code stringlengths 87 55.2k | code_codestyle int64 0 349 | style_context stringlengths 135 49.1k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
"""simple docstring"""
import argparse
import json
import os
import re
import torch
from transformers import BloomConfig, BloomModel
from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case = [
"word_embeddings_layernorm.weight",
"word_embeddings_layernorm.bias",
"input_layernorm.weight",
"input_layernorm.bias",
"post_attention_layernorm.weight",
"post_attention_layernorm.bias",
"self_attention.dense.bias",
"mlp.dense_4h_to_h.bias",
"ln_f.weight",
"ln_f.bias",
]
__snake_case = [
"mlp.dense_4h_to_h.weight",
"self_attention.dense.weight",
]
def A_ ( _lowerCAmelCase : Any, _lowerCAmelCase : List[Any] ):
"""simple docstring"""
_a = {
'''word_embeddings.weight''': '''word_embeddings.weight''',
'''word_embeddings.norm.weight''': '''word_embeddings_layernorm.weight''',
'''word_embeddings.norm.bias''': '''word_embeddings_layernorm.bias''',
'''weight''': '''ln_f.weight''',
'''bias''': '''ln_f.bias''',
}
if key in layer_rename_map:
return layer_rename_map[key]
# Handle transformer blocks
_a = int(re.match(R'''.*layer_(\d*).*''', __lowerCAmelCase )[1] )
layer_number -= 3
return f'h.{layer_number}.' + key
def A_ ( _lowerCAmelCase : Dict ):
"""simple docstring"""
if dtype == torch.bool:
return 1 / 8
_a = re.search(R'''[^\d](\d+)$''', str(__lowerCAmelCase ) )
if bit_search is None:
raise ValueError(f'`dtype` is not a valid dtype: {dtype}.' )
_a = int(bit_search.groups()[0] )
return bit_size // 8
def A_ ( _lowerCAmelCase : Optional[Any], _lowerCAmelCase : Union[str, Any], _lowerCAmelCase : List[Any], _lowerCAmelCase : List[Any], _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
if bloom_config_file == "":
_a = BloomConfig()
else:
_a = BloomConfig.from_json_file(__lowerCAmelCase )
if shard_model:
_a = os.listdir(__lowerCAmelCase )
_a = sorted(filter(lambda _lowerCAmelCase : s.startswith('''layer''' ) and "model_00" in s, __lowerCAmelCase ) )
_a = {'''weight_map''': {}, '''metadata''': {}}
_a = 0
_a = None
_a = BloomConfig()
for j, file in enumerate(__lowerCAmelCase ):
print('''Processing file: {}'''.format(__lowerCAmelCase ) )
_a = None
for i in range(__lowerCAmelCase ):
# load all TP files
_a = file.replace('''model_00''', f'model_0{i}' )
_a = torch.load(os.path.join(__lowerCAmelCase, __lowerCAmelCase ), map_location='''cpu''' )
# Rename keys in the transformers names
_a = list(temp.keys() )
for key in keys:
_a = temp.pop(__lowerCAmelCase )
if tensors is None:
_a = temp
else:
for key in tensors.keys():
if any(key.endswith(__lowerCAmelCase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
_a = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
_a = torch.cat([tensors[key], temp[key]], dim=__lowerCAmelCase )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(__lowerCAmelCase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
_a = tensors[key] / pretraining_tp
torch.save(
__lowerCAmelCase, os.path.join(
__lowerCAmelCase, '''pytorch_model_{}-of-{}.bin'''.format(str(j + 1 ).zfill(5 ), str(len(__lowerCAmelCase ) ).zfill(5 ) ), ), )
for key in tensors.keys():
_a = tensors[key]
total_size += value.numel() * get_dtype_size(value.dtype )
if key not in index_dict["weight_map"]:
_a = '''pytorch_model_{}-of-{}.bin'''.format(
str(j + 1 ).zfill(5 ), str(len(__lowerCAmelCase ) ).zfill(5 ) )
_a = BloomConfig()
_a = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
_a = total_size
with open(__lowerCAmelCase, '''w''', encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
with open(os.path.join(__lowerCAmelCase, WEIGHTS_NAME + '''.index.json''' ), '''w''', encoding='''utf-8''' ) as f:
_a = json.dumps(__lowerCAmelCase, indent=2, sort_keys=__lowerCAmelCase ) + '''\n'''
f.write(__lowerCAmelCase )
else:
_a = BloomModel(__lowerCAmelCase )
_a = os.listdir(__lowerCAmelCase )
_a = sorted(filter(lambda _lowerCAmelCase : s.startswith('''layer''' ) and "model_00" in s, __lowerCAmelCase ) )
_a = None
for i, file in enumerate(__lowerCAmelCase ):
_a = None
for i in range(__lowerCAmelCase ):
# load all TP files
_a = file.replace('''model_00''', f'model_0{i}' )
_a = torch.load(os.path.join(__lowerCAmelCase, __lowerCAmelCase ), map_location='''cpu''' )
# Rename keys in the transformers names
_a = list(temp.keys() )
for key in keys:
_a = temp.pop(__lowerCAmelCase )
if tensors is None:
_a = temp
else:
for key in tensors.keys():
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
if any(key.endswith(__lowerCAmelCase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
_a = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
_a = torch.cat([tensors[key], temp[key]], dim=__lowerCAmelCase )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(__lowerCAmelCase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
_a = tensors[key] / pretraining_tp
_a = model.load_state_dict(__lowerCAmelCase, strict=__lowerCAmelCase )
assert not other_keys.unexpected_keys, f'The keys {other_keys.unexpected_keys} are unexpected'
if missing_keys is None:
_a = set(other_keys.missing_keys )
else:
_a = missing_keys.intersection(set(other_keys.missing_keys ) )
assert not missing_keys, f'The keys {missing_keys} are missing'
# Save pytorch-model
os.makedirs(__lowerCAmelCase, exist_ok=__lowerCAmelCase )
_a = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
_a = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
print(f'Save PyTorch model to {pytorch_weights_dump_path} with dtype {config.torch_dtype}' )
if config.torch_dtype is not None:
_a = model.to(config.torch_dtype )
torch.save(model.state_dict(), __lowerCAmelCase )
print(f'Save configuration file to {pytorch_config_dump_path}' )
with open(__lowerCAmelCase, '''w''', encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--bloom_checkpoint_path''',
default=None,
type=str,
required=True,
help='''Path to the Megatron-LM checkpoint path.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--bloom_config_file''',
default='''''',
type=str,
help=(
'''An optional config json file corresponding to the pre-trained model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--shard_model''',
action='''store_true''',
help='''An optional setting to shard the output model \nThis enables sharding the converted checkpoint''',
)
parser.add_argument(
'''--pretraining_tp''',
default=4,
type=int,
help='''Pretraining TP rank that has been used when training the model in Megatron-LM \n''',
)
__snake_case = parser.parse_args()
convert_bloom_checkpoint_to_pytorch(
args.bloom_checkpoint_path,
args.bloom_config_file,
args.pytorch_dump_folder_path,
args.shard_model,
args.pretraining_tp,
) | 320 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("""TEST_SAGEMAKER""" , """False""" ) ) is not True , reason="""Skipping test because should only be run when releasing minor transformers version""" , )
@pytest.mark.usefixtures("""sm_env""" )
@parameterized_class(
[
{
"""framework""": """pytorch""",
"""script""": """run_glue_model_parallelism.py""",
"""model_name_or_path""": """roberta-large""",
"""instance_type""": """ml.p3dn.24xlarge""",
"""results""": {"""train_runtime""": 1_600, """eval_accuracy""": 0.3, """eval_loss""": 1.2},
},
{
"""framework""": """pytorch""",
"""script""": """run_glue.py""",
"""model_name_or_path""": """roberta-large""",
"""instance_type""": """ml.p3dn.24xlarge""",
"""results""": {"""train_runtime""": 1_600, """eval_accuracy""": 0.3, """eval_loss""": 1.2},
},
] )
class lowerCAmelCase__ ( unittest.TestCase ):
def __A ( self : Optional[int] ) -> List[Any]:
if self.framework == "pytorch":
subprocess.run(
f'''cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'''.split() , encoding='''utf-8''' , check=SCREAMING_SNAKE_CASE__ , )
assert hasattr(self , '''env''' )
def __A ( self : List[str] , SCREAMING_SNAKE_CASE__ : List[Any] ) -> Optional[Any]:
# configuration for running training on smdistributed Model Parallel
__lowerCamelCase = {
'''enabled''': True,
'''processes_per_host''': 8,
}
__lowerCamelCase = {
'''enabled''': True,
'''parameters''': {
'''microbatches''': 4,
'''placement_strategy''': '''spread''',
'''pipeline''': '''interleaved''',
'''optimize''': '''speed''',
'''partitions''': 4,
'''ddp''': True,
},
}
__lowerCamelCase = {'''smdistributed''': {'''modelparallel''': smp_options}, '''mpi''': mpi_options}
__lowerCamelCase = '''trainer''' if self.script == '''run_glue.py''' else '''smtrainer'''
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f'''{self.env.base_job_name}-{instance_count}-smp-{name_extension}''' , instance_count=SCREAMING_SNAKE_CASE__ , instance_type=self.instance_type , debugger_hook_config=SCREAMING_SNAKE_CASE__ , hyperparameters={
**self.env.hyperparameters,
'''model_name_or_path''': self.model_name_or_path,
'''max_steps''': 5_00,
} , metric_definitions=self.env.metric_definitions , distribution=SCREAMING_SNAKE_CASE__ , py_version='''py36''' , )
def __A ( self : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] ) -> List[str]:
TrainingJobAnalytics(SCREAMING_SNAKE_CASE__ ).export_csv(f'''{self.env.test_path}/{job_name}_metrics.csv''' )
@parameterized.expand([(1,)] )
def __A ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Optional[int]:
# create estimator
__lowerCamelCase = self.create_estimator(SCREAMING_SNAKE_CASE__ )
# run training
estimator.fit()
# result dataframe
__lowerCamelCase = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
__lowerCamelCase = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] )
__lowerCamelCase = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
__lowerCamelCase = (
Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' , 99_99_99 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy )
assert all(t <= self.results['''eval_loss'''] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f'''{estimator.latest_training_job.name}.json''' , '''w''' ) as outfile:
json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , SCREAMING_SNAKE_CASE__ )
| 270 | 0 |
from .data_collator import (
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForSeqaSeq,
DataCollatorForSOP,
DataCollatorForTokenClassification,
DataCollatorForWholeWordMask,
DataCollatorWithPadding,
DefaultDataCollator,
default_data_collator,
)
from .metrics import glue_compute_metrics, xnli_compute_metrics
from .processors import (
DataProcessor,
InputExample,
InputFeatures,
SingleSentenceClassificationProcessor,
SquadExample,
SquadFeatures,
SquadVaProcessor,
SquadVaProcessor,
glue_convert_examples_to_features,
glue_output_modes,
glue_processors,
glue_tasks_num_labels,
squad_convert_examples_to_features,
xnli_output_modes,
xnli_processors,
xnli_tasks_num_labels,
)
| 305 |
from __future__ import annotations
from fractions import Fraction
def __magic_name__ ( __lowerCAmelCase : int , __lowerCAmelCase : int ) -> bool:
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def __magic_name__ ( __lowerCAmelCase : int ) -> list[str]:
__lowerCamelCase = []
__lowerCamelCase = 11
__lowerCamelCase = int('''1''' + '''0''' * digit_len )
for num in range(__lowerCAmelCase , __lowerCAmelCase ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(__lowerCAmelCase , __lowerCAmelCase ):
solutions.append(f'''{num}/{den}''' )
den += 1
num += 1
__lowerCamelCase = 10
return solutions
def __magic_name__ ( __lowerCAmelCase : int = 2 ) -> int:
__lowerCamelCase = 1.0
for fraction in fraction_list(__lowerCAmelCase ):
__lowerCamelCase = Fraction(__lowerCAmelCase )
result *= frac.denominator / frac.numerator
return int(__lowerCAmelCase )
if __name__ == "__main__":
print(solution())
| 270 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
"allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json",
"allenai/longformer-large-4096": "https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json",
"allenai/longformer-large-4096-finetuned-triviaqa": (
"https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json"
),
"allenai/longformer-base-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json"
),
"allenai/longformer-large-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json"
),
}
class lowerCAmelCase_ ( __lowercase ):
'''simple docstring'''
lowerCAmelCase_ : List[str] = """longformer"""
def __init__( self : Tuple , _UpperCAmelCase : Union[List[int], int] = 5_12 , _UpperCAmelCase : int = 2 , _UpperCAmelCase : int = 1 , _UpperCAmelCase : int = 0 , _UpperCAmelCase : int = 2 , _UpperCAmelCase : int = 3_05_22 , _UpperCAmelCase : int = 7_68 , _UpperCAmelCase : int = 12 , _UpperCAmelCase : int = 12 , _UpperCAmelCase : int = 30_72 , _UpperCAmelCase : str = "gelu" , _UpperCAmelCase : float = 0.1 , _UpperCAmelCase : float = 0.1 , _UpperCAmelCase : int = 5_12 , _UpperCAmelCase : int = 2 , _UpperCAmelCase : float = 0.02 , _UpperCAmelCase : float = 1E-12 , _UpperCAmelCase : bool = False , **_UpperCAmelCase : Dict , ):
"""simple docstring"""
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = attention_window
UpperCAmelCase__ = sep_token_id
UpperCAmelCase__ = bos_token_id
UpperCAmelCase__ = eos_token_id
UpperCAmelCase__ = vocab_size
UpperCAmelCase__ = hidden_size
UpperCAmelCase__ = num_hidden_layers
UpperCAmelCase__ = num_attention_heads
UpperCAmelCase__ = hidden_act
UpperCAmelCase__ = intermediate_size
UpperCAmelCase__ = hidden_dropout_prob
UpperCAmelCase__ = attention_probs_dropout_prob
UpperCAmelCase__ = max_position_embeddings
UpperCAmelCase__ = type_vocab_size
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = layer_norm_eps
UpperCAmelCase__ = onnx_export
class lowerCAmelCase_ ( __lowercase ):
'''simple docstring'''
def __init__( self : int , _UpperCAmelCase : "PretrainedConfig" , _UpperCAmelCase : str = "default" , _UpperCAmelCase : "List[PatchingSpec]" = None ):
"""simple docstring"""
super().__init__(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = True
@property
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
if self.task == "multiple-choice":
UpperCAmelCase__ = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
UpperCAmelCase__ = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""global_attention_mask""", dynamic_axis),
] )
@property
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
UpperCAmelCase__ = super().outputs
if self.task == "default":
UpperCAmelCase__ = {0: """batch"""}
return outputs
@property
def SCREAMING_SNAKE_CASE__ ( self : int ):
"""simple docstring"""
return 1E-4
@property
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
return max(super().default_onnx_opset , 14 )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , _UpperCAmelCase : "PreTrainedTokenizerBase" , _UpperCAmelCase : int = -1 , _UpperCAmelCase : int = -1 , _UpperCAmelCase : bool = False , _UpperCAmelCase : Optional[TensorType] = None , ):
"""simple docstring"""
UpperCAmelCase__ = super().generate_dummy_inputs(
preprocessor=SCREAMING_SNAKE_CASE__ , batch_size=SCREAMING_SNAKE_CASE__ , seq_length=SCREAMING_SNAKE_CASE__ , is_pair=SCREAMING_SNAKE_CASE__ , framework=SCREAMING_SNAKE_CASE__ )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
UpperCAmelCase__ = torch.zeros_like(inputs["""input_ids"""] )
# make every second token global
UpperCAmelCase__ = 1
return inputs
| 346 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
SCREAMING_SNAKE_CASE__ : List[str] = {
"configuration_blip": [
"BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlipConfig",
"BlipTextConfig",
"BlipVisionConfig",
],
"processing_blip": ["BlipProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : str = ["BlipImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : int = [
"BLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlipModel",
"BlipPreTrainedModel",
"BlipForConditionalGeneration",
"BlipForQuestionAnswering",
"BlipVisionModel",
"BlipTextModel",
"BlipForImageTextRetrieval",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Any = [
"TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFBlipModel",
"TFBlipPreTrainedModel",
"TFBlipForConditionalGeneration",
"TFBlipForQuestionAnswering",
"TFBlipVisionModel",
"TFBlipTextModel",
"TFBlipForImageTextRetrieval",
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 270 | 0 |
def lowercase_ ( _A : str ):
"""simple docstring"""
if not all(x.isalpha() for x in string ):
raise ValueError("String must only contain alphabetic characters." )
lowerCamelCase__ : Any = sorted(string.lower() )
return len(__lowerCAmelCase ) == len(set(__lowerCAmelCase ) )
if __name__ == "__main__":
A : Optional[Any] = input("Enter a string ").strip()
A : Dict = is_isogram(input_str)
print(f'{input_str} is {"an" if isogram else "not an"} isogram.')
| 184 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE__ : Optional[Any] = {"configuration_wavlm": ["WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "WavLMConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : int = [
"WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"WavLMForAudioFrameClassification",
"WavLMForCTC",
"WavLMForSequenceClassification",
"WavLMForXVector",
"WavLMModel",
"WavLMPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavlm import (
WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST,
WavLMForAudioFrameClassification,
WavLMForCTC,
WavLMForSequenceClassification,
WavLMForXVector,
WavLMModel,
WavLMPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 270 | 0 |
"""simple docstring"""
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def lowerCAmelCase_ ( snake_case_ : List[str] , snake_case_ : Optional[int] ) ->Any:
lowerCamelCase__ : Optional[int] =checkpoint
lowerCamelCase__ : List[Any] ={}
lowerCamelCase__ : Tuple =vae_state_dict['encoder.conv_in.weight']
lowerCamelCase__ : Union[str, Any] =vae_state_dict['encoder.conv_in.bias']
lowerCamelCase__ : Tuple =vae_state_dict['encoder.conv_out.weight']
lowerCamelCase__ : List[Any] =vae_state_dict['encoder.conv_out.bias']
lowerCamelCase__ : Any =vae_state_dict['encoder.norm_out.weight']
lowerCamelCase__ : str =vae_state_dict['encoder.norm_out.bias']
lowerCamelCase__ : Optional[int] =vae_state_dict['decoder.conv_in.weight']
lowerCamelCase__ : int =vae_state_dict['decoder.conv_in.bias']
lowerCamelCase__ : List[Any] =vae_state_dict['decoder.conv_out.weight']
lowerCamelCase__ : Dict =vae_state_dict['decoder.conv_out.bias']
lowerCamelCase__ : List[str] =vae_state_dict['decoder.norm_out.weight']
lowerCamelCase__ : Optional[int] =vae_state_dict['decoder.norm_out.bias']
lowerCamelCase__ : Optional[Any] =vae_state_dict['quant_conv.weight']
lowerCamelCase__ : int =vae_state_dict['quant_conv.bias']
lowerCamelCase__ : str =vae_state_dict['post_quant_conv.weight']
lowerCamelCase__ : str =vae_state_dict['post_quant_conv.bias']
# Retrieves the keys for the encoder down blocks only
lowerCamelCase__ : Optional[Any] =len({'.'.join(layer.split('.' )[:3] ) for layer in vae_state_dict if 'encoder.down' in layer} )
lowerCamelCase__ : Tuple ={
layer_id: [key for key in vae_state_dict if f"""down.{layer_id}""" in key] for layer_id in range(__lowerCAmelCase )
}
# Retrieves the keys for the decoder up blocks only
lowerCamelCase__ : int =len({'.'.join(layer.split('.' )[:3] ) for layer in vae_state_dict if 'decoder.up' in layer} )
lowerCamelCase__ : Optional[int] ={
layer_id: [key for key in vae_state_dict if f"""up.{layer_id}""" in key] for layer_id in range(__lowerCAmelCase )
}
for i in range(__lowerCAmelCase ):
lowerCamelCase__ : List[Any] =[key for key in down_blocks[i] if f"""down.{i}""" in key and f"""down.{i}.downsample""" not in key]
if f"""encoder.down.{i}.downsample.conv.weight""" in vae_state_dict:
lowerCamelCase__ : Any =vae_state_dict.pop(
f"""encoder.down.{i}.downsample.conv.weight""" )
lowerCamelCase__ : Optional[Any] =vae_state_dict.pop(
f"""encoder.down.{i}.downsample.conv.bias""" )
lowerCamelCase__ : Tuple =renew_vae_resnet_paths(__lowerCAmelCase )
lowerCamelCase__ : List[Any] ={'old': f"""down.{i}.block""", 'new': f"""down_blocks.{i}.resnets"""}
assign_to_checkpoint(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , additional_replacements=[meta_path] , config=__lowerCAmelCase )
lowerCamelCase__ : Dict =[key for key in vae_state_dict if 'encoder.mid.block' in key]
lowerCamelCase__ : Dict =2
for i in range(1 , num_mid_res_blocks + 1 ):
lowerCamelCase__ : Optional[int] =[key for key in mid_resnets if f"""encoder.mid.block_{i}""" in key]
lowerCamelCase__ : str =renew_vae_resnet_paths(__lowerCAmelCase )
lowerCamelCase__ : int ={'old': f"""mid.block_{i}""", 'new': f"""mid_block.resnets.{i - 1}"""}
assign_to_checkpoint(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , additional_replacements=[meta_path] , config=__lowerCAmelCase )
lowerCamelCase__ : int =[key for key in vae_state_dict if 'encoder.mid.attn' in key]
lowerCamelCase__ : int =renew_vae_attention_paths(__lowerCAmelCase )
lowerCamelCase__ : Any ={'old': 'mid.attn_1', 'new': 'mid_block.attentions.0'}
assign_to_checkpoint(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , additional_replacements=[meta_path] , config=__lowerCAmelCase )
conv_attn_to_linear(__lowerCAmelCase )
for i in range(__lowerCAmelCase ):
lowerCamelCase__ : Any =num_up_blocks - 1 - i
lowerCamelCase__ : str =[
key for key in up_blocks[block_id] if f"""up.{block_id}""" in key and f"""up.{block_id}.upsample""" not in key
]
if f"""decoder.up.{block_id}.upsample.conv.weight""" in vae_state_dict:
lowerCamelCase__ : List[Any] =vae_state_dict[
f"""decoder.up.{block_id}.upsample.conv.weight"""
]
lowerCamelCase__ : int =vae_state_dict[
f"""decoder.up.{block_id}.upsample.conv.bias"""
]
lowerCamelCase__ : Any =renew_vae_resnet_paths(__lowerCAmelCase )
lowerCamelCase__ : List[Any] ={'old': f"""up.{block_id}.block""", 'new': f"""up_blocks.{i}.resnets"""}
assign_to_checkpoint(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , additional_replacements=[meta_path] , config=__lowerCAmelCase )
lowerCamelCase__ : Dict =[key for key in vae_state_dict if 'decoder.mid.block' in key]
lowerCamelCase__ : Union[str, Any] =2
for i in range(1 , num_mid_res_blocks + 1 ):
lowerCamelCase__ : Optional[Any] =[key for key in mid_resnets if f"""decoder.mid.block_{i}""" in key]
lowerCamelCase__ : Dict =renew_vae_resnet_paths(__lowerCAmelCase )
lowerCamelCase__ : List[str] ={'old': f"""mid.block_{i}""", 'new': f"""mid_block.resnets.{i - 1}"""}
assign_to_checkpoint(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , additional_replacements=[meta_path] , config=__lowerCAmelCase )
lowerCamelCase__ : List[Any] =[key for key in vae_state_dict if 'decoder.mid.attn' in key]
lowerCamelCase__ : Union[str, Any] =renew_vae_attention_paths(__lowerCAmelCase )
lowerCamelCase__ : List[str] ={'old': 'mid.attn_1', 'new': 'mid_block.attentions.0'}
assign_to_checkpoint(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , additional_replacements=[meta_path] , config=__lowerCAmelCase )
conv_attn_to_linear(__lowerCAmelCase )
return new_checkpoint
def lowerCAmelCase_ ( snake_case_ : str , snake_case_ : str , ) ->List[str]:
# Only support V1
lowerCamelCase__ : int =requests.get(
' https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml' )
lowerCamelCase__ : Optional[Any] =io.BytesIO(r.content )
lowerCamelCase__ : Optional[int] =OmegaConf.load(__lowerCAmelCase )
lowerCamelCase__ : Any =5_1_2
lowerCamelCase__ : Optional[int] ='cuda' if torch.cuda.is_available() else 'cpu'
if checkpoint_path.endswith('safetensors' ):
from safetensors import safe_open
lowerCamelCase__ : Tuple ={}
with safe_open(__lowerCAmelCase , framework='pt' , device='cpu' ) as f:
for key in f.keys():
lowerCamelCase__ : Any =f.get_tensor(__lowerCAmelCase )
else:
lowerCamelCase__ : List[str] =torch.load(__lowerCAmelCase , map_location=__lowerCAmelCase )['state_dict']
# Convert the VAE model.
lowerCamelCase__ : Any =create_vae_diffusers_config(__lowerCAmelCase , image_size=__lowerCAmelCase )
lowerCamelCase__ : str =custom_convert_ldm_vae_checkpoint(__lowerCAmelCase , __lowerCAmelCase )
lowerCamelCase__ : Dict =AutoencoderKL(**__lowerCAmelCase )
vae.load_state_dict(__lowerCAmelCase )
vae.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument("""--vae_pt_path""", default=None, type=str, required=True, help="""Path to the VAE.pt to convert.""")
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the VAE.pt to convert.""")
lowerCAmelCase = parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path) | 126 |
import pprint
import requests
SCREAMING_SNAKE_CASE__ : str = "https://zenquotes.io/api"
def __magic_name__ ( ) -> list:
return requests.get(API_ENDPOINT_URL + '''/today''' ).json()
def __magic_name__ ( ) -> list:
return requests.get(API_ENDPOINT_URL + '''/random''' ).json()
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : int = random_quotes()
pprint.pprint(response)
| 270 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowercase : List[Any] = {
"configuration_bridgetower": [
"BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BridgeTowerConfig",
"BridgeTowerTextConfig",
"BridgeTowerVisionConfig",
],
"processing_bridgetower": ["BridgeTowerProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : str = ["BridgeTowerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Optional[Any] = [
"BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST",
"BridgeTowerForContrastiveLearning",
"BridgeTowerForImageAndTextRetrieval",
"BridgeTowerForMaskedLM",
"BridgeTowerModel",
"BridgeTowerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_bridgetower import (
BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP,
BridgeTowerConfig,
BridgeTowerTextConfig,
BridgeTowerVisionConfig,
)
from .processing_bridgetower import BridgeTowerProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_bridgetower import BridgeTowerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bridgetower import (
BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST,
BridgeTowerForContrastiveLearning,
BridgeTowerForImageAndTextRetrieval,
BridgeTowerForMaskedLM,
BridgeTowerModel,
BridgeTowerPreTrainedModel,
)
else:
import sys
_lowercase : str = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 238 |
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def __magic_name__ ( __lowerCAmelCase : int ) -> int:
__lowerCamelCase = prime_factors(__lowerCAmelCase )
if is_square_free(__lowerCAmelCase ):
return -1 if len(__lowerCAmelCase ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 270 | 0 |
# HF Trainer benchmarking tool
#
# This tool can be used to run and compare multiple dimensions of the HF Trainers args.
#
# It then prints a report once in github format with all the information that needs to be shared
# with others and second time in a console-friendly format, so it's easier to use for tuning things up.
#
# The main idea is:
#
# ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \
# --target-metric-key train_samples_per_second
#
# The variations can be any command line argument that you want to compare and not just dtype as in
# the example.
#
# --variations allows you to compare variations in multiple dimensions.
#
# as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6
# times adding one of:
#
# 1. --tf32 0 --fp16 0
# 2. --tf32 0 --fp16 1
# 3. --tf32 0 --bf16 1
# 4. --tf32 1 --fp16 0
# 5. --tf32 1 --fp16 1
# 6. --tf32 1 --bf16 1
#
# and print the results. This is just a cartesian product - and more than 2 dimensions can be used.
#
# If you want to rely on defaults, this:
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1'
# is identical to this:
# --variations '--tf32 0|--tf32 1' '|--fp16|--bf16'
#
# the leading empty variation in the 2nd dimension is a valid variation.
#
# So here we get the following 6 variations:
#
# 1. --tf32 0
# 2. --tf32 0 --fp16
# 3. --tf32 0 --bf16
# 4. --tf32 1
# 5. --tf32 1 --fp16
# 6. --tf32 1 --bf16
#
# In this particular case we don't know what the default tf32 setting is as it's normally
# pytorch-version dependent). That's why it's best to do an explicit setting of each variation:
# `--tf32 0|--tf32 1`
#
# Here is a full example of a train:
#
# CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \
# --base-cmd \
# ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \
# --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \
# --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \
# --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \
# --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \
# --source_prefix "translate English to Romanian: " --warmup_steps 50 \
# --max_train_samples 20000 --dataloader_num_workers 2 ' \
# --target-metric-key train_samples_per_second --repeat-times 1 --variations \
# '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \
# --repeat-times 1 --base-variation '--tf32 0'
#
# and here is a possible output:
#
#
# | Variation | Train | Diff | Train |
# | | samples | % | loss |
# | | per | | |
# | | second | | |
# |:----------------|----------:|-------:|--------:|
# | --tf32 0 | 285.11 | 0 | 2.51 |
# | --tf32 1 | 342.09 | 20 | 2.51 |
# | --fp16 --tf32 0 | 423.49 | 49 | 2.51 |
# | --fp16 --tf32 1 | 423.13 | 48 | 2.51 |
# | --bf16 --tf32 0 | 416.80 | 46 | 2.52 |
# | --bf16 --tf32 1 | 415.87 | 46 | 2.52 |
#
#
# So you can quickly compare the different outcomes.
#
# Typically running each experiment once is enough, but if the environment is unstable you can
# re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results.
#
# By default it'll use the lowest result as the base line to use as 100% and then compare the rest to
# it as can be seen from the table above, but you can also specify which combination is the one to use as
# the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0'
#
# --target-metric-key is there to tell the program which metrics to compare - the different metric keys are
# inside output_dir/all_results.json. e.g., to measure eval performance instead of train use:
# --target-metric-key eval_samples_per_second
# but of course you will need to adjust the --base-cmd value in the example to perform evaluation as
# well (as currently it doesn't)
#
import argparse
import datetime
import io
import itertools
import json
import math
import os
import platform
import re
import shlex
import subprocess
import sys
from pathlib import Path
from statistics import fmean
import pandas as pd
import torch
from tqdm import tqdm
import transformers
lowercase__ : Union[str, Any] = float("nan")
class UpperCAmelCase :
'''simple docstring'''
def __init__( self : Optional[Any] , __lowercase : str ):
"""simple docstring"""
snake_case_ = sys.stdout
snake_case_ = open(SCREAMING_SNAKE_CASE__ , "a" )
def __getattr__( self : Dict , __lowercase : Any ):
"""simple docstring"""
return getattr(self.stdout , SCREAMING_SNAKE_CASE__ )
def snake_case__ ( self : Optional[Any] , __lowercase : Dict ):
"""simple docstring"""
self.stdout.write(SCREAMING_SNAKE_CASE__ )
# strip tqdm codes
self.file.write(re.sub(r"^.*\r" , "" , SCREAMING_SNAKE_CASE__ , 0 , re.M ) )
def lowerCamelCase__ ( _A=80 , _A=False ):
'''simple docstring'''
snake_case_ = []
# deal with critical env vars
snake_case_ = ["CUDA_VISIBLE_DEVICES"]
for key in env_keys:
snake_case_ = os.environ.get(__lowerCAmelCase , __lowerCAmelCase )
if val is not None:
cmd.append(f"{key}={val}" )
# python executable (not always needed if the script is executable)
snake_case_ = sys.executable if full_python_path else sys.executable.split("/" )[-1]
cmd.append(__lowerCAmelCase )
# now the normal args
cmd += list(map(shlex.quote , sys.argv ) )
# split up into up to MAX_WIDTH lines with shell multi-line escapes
snake_case_ = []
snake_case_ = ""
while len(__lowerCAmelCase ) > 0:
current_line += f"{cmd.pop(0 )} "
if len(__lowerCAmelCase ) == 0 or len(__lowerCAmelCase ) + len(cmd[0] ) + 1 > max_width - 1:
lines.append(__lowerCAmelCase )
snake_case_ = ""
return "\\\n".join(__lowerCAmelCase )
def lowerCamelCase__ ( _A , _A ):
'''simple docstring'''
snake_case_ = re.sub(R"[\\\n]+" , " " , args.base_cmd )
# remove --output_dir if any and set our own
snake_case_ = re.sub("--output_dir\s+[^\s]+" , "" , args.base_cmd )
args.base_cmd += f" --output_dir {output_dir}"
# ensure we have --overwrite_output_dir
snake_case_ = re.sub("--overwrite_output_dir\s+" , "" , args.base_cmd )
args.base_cmd += " --overwrite_output_dir"
return [sys.executable] + shlex.split(args.base_cmd )
def lowerCamelCase__ ( _A , _A , _A , _A , _A , _A , _A ):
'''simple docstring'''
if 0:
import random
from time import sleep
sleep(0 )
return dict(
{k: random.uniform(0 , 100 ) for k in metric_keys} , **{target_metric_key: random.choice([nan, 10.31, 1_00.2, 55.66_66, 2_22.22_22_22_22] )} , )
snake_case_ = subprocess.run(__lowerCAmelCase , capture_output=__lowerCAmelCase , text=__lowerCAmelCase )
if verbose:
print("STDOUT" , result.stdout )
print("STDERR" , result.stderr )
# save the streams
snake_case_ = variation.replace(" " , "-" )
with open(Path(__lowerCAmelCase ) / f"log.{prefix}.stdout.txt" , "w" ) as f:
f.write(result.stdout )
with open(Path(__lowerCAmelCase ) / f"log.{prefix}.stderr.txt" , "w" ) as f:
f.write(result.stderr )
if result.returncode != 0:
if verbose:
print("failed" )
return {target_metric_key: nan}
with io.open(f"{output_dir}/all_results.json" , "r" , encoding="utf-8" ) as f:
snake_case_ = json.load(__lowerCAmelCase )
# filter out just the keys we want
return {k: v for k, v in metrics.items() if k in metric_keys}
def lowerCamelCase__ ( _A , _A , _A , _A , _A , _A , _A , _A , _A , _A , ):
'''simple docstring'''
snake_case_ = []
snake_case_ = []
snake_case_ = f"{id}: {variation:<{longest_variation_len}}"
snake_case_ = f"{preamble}: "
snake_case_ = set(report_metric_keys + [target_metric_key] )
for i in tqdm(range(__lowerCAmelCase ) , desc=__lowerCAmelCase , leave=__lowerCAmelCase ):
snake_case_ = process_run_single(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
snake_case_ = single_run_metrics[target_metric_key]
if not math.isnan(__lowerCAmelCase ):
metrics.append(__lowerCAmelCase )
results.append(__lowerCAmelCase )
outcome += "✓"
else:
outcome += "✘"
snake_case_ = f"\33[2K\r{outcome}"
if len(__lowerCAmelCase ) > 0:
snake_case_ = {k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()}
snake_case_ = round(mean_metrics[target_metric_key] , 2 )
snake_case_ = f"{outcome} {mean_target}"
if len(__lowerCAmelCase ) > 1:
results_str += f" {tuple(round(__lowerCAmelCase , 2 ) for x in results )}"
print(__lowerCAmelCase )
snake_case_ = variation
return mean_metrics
else:
print(__lowerCAmelCase )
return {variation_key: variation, target_metric_key: nan}
def lowerCamelCase__ ( ):
'''simple docstring'''
snake_case_ = torch.cuda.get_device_properties(torch.device("cuda" ) )
return f"\nDatetime : {datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S' )}\n\nSoftware:\ntransformers: {transformers.__version__}\ntorch : {torch.__version__}\ncuda : {torch.version.cuda}\npython : {platform.python_version()}\n\nHardware:\n{torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB\n"
def lowerCamelCase__ ( _A , _A , _A , _A , _A ):
'''simple docstring'''
snake_case_ = pd.DataFrame(__lowerCAmelCase )
snake_case_ = "variation"
snake_case_ = "diff_%"
snake_case_ = nan
if base_variation is not None and len(df[df[variation_key] == base_variation] ):
# this may still return nan
snake_case_ = df.loc[df[variation_key] == base_variation][target_metric_key].item()
if math.isnan(__lowerCAmelCase ):
# as a fallback, use the minimal value as the sentinel
snake_case_ = df.loc[df[target_metric_key] != nan][target_metric_key].min()
# create diff column if possible
if not math.isnan(__lowerCAmelCase ):
snake_case_ = df.apply(
lambda _A : round(100 * (r[target_metric_key] - sentinel_value) / sentinel_value )
if not math.isnan(r[target_metric_key] )
else 0 , axis="columns" , )
# re-order columns
snake_case_ = [variation_key, target_metric_key, diff_key, *report_metric_keys]
snake_case_ = df.reindex(__lowerCAmelCase , axis="columns" ) # reorder cols
# capitalize
snake_case_ = df.rename(str.capitalize , axis="columns" )
# make the cols as narrow as possible
snake_case_ = df.rename(lambda _A : c.replace("_" , "<br>" ) , axis="columns" )
snake_case_ = df.rename(lambda _A : c.replace("_" , "\n" ) , axis="columns" )
snake_case_ = ["", "Copy between the cut-here-lines and paste as is to github or a forum"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results:", df_github.to_markdown(index=__lowerCAmelCase , floatfmt=".2f" )]
report += ["```"]
report += ["*** Setup:", get_versions()]
report += ["*** The benchmark command line was:", get_original_command()]
report += ["```"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results (console):", df_console.to_markdown(index=__lowerCAmelCase , floatfmt=".2f" )]
print("\n\n".join(__lowerCAmelCase ) )
def lowerCamelCase__ ( ):
'''simple docstring'''
snake_case_ = argparse.ArgumentParser()
parser.add_argument(
"--base-cmd" , default=__lowerCAmelCase , type=__lowerCAmelCase , required=__lowerCAmelCase , help="Base cmd" , )
parser.add_argument(
"--variations" , default=__lowerCAmelCase , type=__lowerCAmelCase , nargs="+" , required=__lowerCAmelCase , help="Multi-dimensional variations, example: \'|--fp16|--bf16\' \'|--tf32\'" , )
parser.add_argument(
"--base-variation" , default=__lowerCAmelCase , type=__lowerCAmelCase , help="Baseline variation to compare to. if None the minimal target value will be used to compare against" , )
parser.add_argument(
"--target-metric-key" , default=__lowerCAmelCase , type=__lowerCAmelCase , required=__lowerCAmelCase , help="Target metric key in output_dir/all_results.json, e.g., train_samples_per_second" , )
parser.add_argument(
"--report-metric-keys" , default="" , type=__lowerCAmelCase , help="Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., \'train_loss train_samples" , )
parser.add_argument(
"--repeat-times" , default=1 , type=__lowerCAmelCase , help="How many times to re-run each variation - an average will be reported" , )
parser.add_argument(
"--output_dir" , default="output_benchmark" , type=__lowerCAmelCase , help="The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked" , )
parser.add_argument(
"--verbose" , default=__lowerCAmelCase , action="store_true" , help="Whether to show the outputs of each run or just the benchmark progress" , )
snake_case_ = parser.parse_args()
snake_case_ = args.output_dir
Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase )
snake_case_ = get_base_command(__lowerCAmelCase , __lowerCAmelCase )
# split each dimension into its --foo variations
snake_case_ = [list(map(str.strip , re.split(R"\|" , __lowerCAmelCase ) ) ) for x in args.variations]
# build a cartesian product of dimensions and convert those back into cmd-line arg strings,
# while stripping white space for inputs that were empty
snake_case_ = list(map(str.strip , map(" ".join , itertools.product(*__lowerCAmelCase ) ) ) )
snake_case_ = max(len(__lowerCAmelCase ) for x in variations )
# split wanted keys
snake_case_ = args.report_metric_keys.split()
# capture prints into a log file for convenience
snake_case_ = f"benchmark-report-{datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S' )}.txt"
print(f"\nNote: each run\'s output is also logged under {output_dir}/log.*.std*.txt" )
print(f"and this script\'s output is also piped into {report_fn}" )
snake_case_ = Tee(__lowerCAmelCase )
print(f"\n*** Running {len(__lowerCAmelCase )} benchmarks:" )
print(f"Base command: {' '.join(__lowerCAmelCase )}" )
snake_case_ = "variation"
snake_case_ = []
for id, variation in enumerate(tqdm(__lowerCAmelCase , desc="Total completion: " , leave=__lowerCAmelCase ) ):
snake_case_ = base_cmd + variation.split()
results.append(
process_run(
id + 1 , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , args.target_metric_key , __lowerCAmelCase , args.repeat_times , __lowerCAmelCase , args.verbose , ) )
process_results(__lowerCAmelCase , args.target_metric_key , __lowerCAmelCase , args.base_variation , __lowerCAmelCase )
if __name__ == "__main__":
main()
| 187 |
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
SCREAMING_SNAKE_CASE__ : Union[str, Any] = re.compile(r"\b(a|an|the)\b", re.UNICODE)
SCREAMING_SNAKE_CASE__ : int = None
def __magic_name__ ( ) -> str:
__lowerCamelCase = argparse.ArgumentParser('''Official evaluation script for SQuAD version 2.0.''' )
parser.add_argument('''data_file''' , metavar='''data.json''' , help='''Input data JSON file.''' )
parser.add_argument('''pred_file''' , metavar='''pred.json''' , help='''Model predictions.''' )
parser.add_argument(
'''--out-file''' , '''-o''' , metavar='''eval.json''' , help='''Write accuracy metrics to file (default is stdout).''' )
parser.add_argument(
'''--na-prob-file''' , '''-n''' , metavar='''na_prob.json''' , help='''Model estimates of probability of no answer.''' )
parser.add_argument(
'''--na-prob-thresh''' , '''-t''' , type=__lowerCAmelCase , default=1.0 , help='''Predict "" if no-answer probability exceeds this (default = 1.0).''' , )
parser.add_argument(
'''--out-image-dir''' , '''-p''' , metavar='''out_images''' , default=__lowerCAmelCase , help='''Save precision-recall curves to directory.''' )
parser.add_argument('''--verbose''' , '''-v''' , action='''store_true''' )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def __magic_name__ ( __lowerCAmelCase : List[str] ) -> Union[str, Any]:
__lowerCamelCase = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
__lowerCamelCase = bool(qa['''answers''']['''text'''] )
return qid_to_has_ans
def __magic_name__ ( __lowerCAmelCase : Dict ) -> Optional[Any]:
def remove_articles(__lowerCAmelCase : Optional[int] ):
return ARTICLES_REGEX.sub(''' ''' , __lowerCAmelCase )
def white_space_fix(__lowerCAmelCase : Optional[int] ):
return " ".join(text.split() )
def remove_punc(__lowerCAmelCase : Union[str, Any] ):
__lowerCamelCase = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(__lowerCAmelCase : Dict ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(__lowerCAmelCase ) ) ) )
def __magic_name__ ( __lowerCAmelCase : List[Any] ) -> Optional[int]:
if not s:
return []
return normalize_answer(__lowerCAmelCase ).split()
def __magic_name__ ( __lowerCAmelCase : Tuple , __lowerCAmelCase : Tuple ) -> int:
return int(normalize_answer(__lowerCAmelCase ) == normalize_answer(__lowerCAmelCase ) )
def __magic_name__ ( __lowerCAmelCase : Any , __lowerCAmelCase : Tuple ) -> str:
__lowerCamelCase = get_tokens(__lowerCAmelCase )
__lowerCamelCase = get_tokens(__lowerCAmelCase )
__lowerCamelCase = collections.Counter(__lowerCAmelCase ) & collections.Counter(__lowerCAmelCase )
__lowerCamelCase = sum(common.values() )
if len(__lowerCAmelCase ) == 0 or len(__lowerCAmelCase ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
__lowerCamelCase = 1.0 * num_same / len(__lowerCAmelCase )
__lowerCamelCase = 1.0 * num_same / len(__lowerCAmelCase )
__lowerCamelCase = (2 * precision * recall) / (precision + recall)
return fa
def __magic_name__ ( __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] ) -> Optional[Any]:
__lowerCamelCase = {}
__lowerCamelCase = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
__lowerCamelCase = qa['''id''']
__lowerCamelCase = [t for t in qa['''answers''']['''text'''] if normalize_answer(__lowerCAmelCase )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
__lowerCamelCase = ['''''']
if qid not in preds:
print(f'''Missing prediction for {qid}''' )
continue
__lowerCamelCase = preds[qid]
# Take max over all gold answers
__lowerCamelCase = max(compute_exact(__lowerCAmelCase , __lowerCAmelCase ) for a in gold_answers )
__lowerCamelCase = max(compute_fa(__lowerCAmelCase , __lowerCAmelCase ) for a in gold_answers )
return exact_scores, fa_scores
def __magic_name__ ( __lowerCAmelCase : Tuple , __lowerCAmelCase : str , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Union[str, Any] ) -> List[str]:
__lowerCamelCase = {}
for qid, s in scores.items():
__lowerCamelCase = na_probs[qid] > na_prob_thresh
if pred_na:
__lowerCamelCase = float(not qid_to_has_ans[qid] )
else:
__lowerCamelCase = s
return new_scores
def __magic_name__ ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[Any]=None ) -> Union[str, Any]:
if not qid_list:
__lowerCamelCase = len(__lowerCAmelCase )
return collections.OrderedDict(
[
('''exact''', 100.0 * sum(exact_scores.values() ) / total),
('''f1''', 100.0 * sum(fa_scores.values() ) / total),
('''total''', total),
] )
else:
__lowerCamelCase = len(__lowerCAmelCase )
return collections.OrderedDict(
[
('''exact''', 100.0 * sum(exact_scores[k] for k in qid_list ) / total),
('''f1''', 100.0 * sum(fa_scores[k] for k in qid_list ) / total),
('''total''', total),
] )
def __magic_name__ ( __lowerCAmelCase : Any , __lowerCAmelCase : str , __lowerCAmelCase : Optional[Any] ) -> int:
for k in new_eval:
__lowerCamelCase = new_eval[k]
def __magic_name__ ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any] ) -> Optional[Any]:
plt.step(__lowerCAmelCase , __lowerCAmelCase , color='''b''' , alpha=0.2 , where='''post''' )
plt.fill_between(__lowerCAmelCase , __lowerCAmelCase , step='''post''' , alpha=0.2 , color='''b''' )
plt.xlabel('''Recall''' )
plt.ylabel('''Precision''' )
plt.xlim([0.0, 1.05] )
plt.ylim([0.0, 1.05] )
plt.title(__lowerCAmelCase )
plt.savefig(__lowerCAmelCase )
plt.clf()
def __magic_name__ ( __lowerCAmelCase : Any , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any]=None , __lowerCAmelCase : Tuple=None ) -> int:
__lowerCamelCase = sorted(__lowerCAmelCase , key=lambda __lowerCAmelCase : na_probs[k] )
__lowerCamelCase = 0.0
__lowerCamelCase = 1.0
__lowerCamelCase = 0.0
__lowerCamelCase = [1.0]
__lowerCamelCase = [0.0]
__lowerCamelCase = 0.0
for i, qid in enumerate(__lowerCAmelCase ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
__lowerCamelCase = true_pos / float(i + 1 )
__lowerCamelCase = true_pos / float(__lowerCAmelCase )
if i == len(__lowerCAmelCase ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(__lowerCAmelCase )
recalls.append(__lowerCAmelCase )
if out_image:
plot_pr_curve(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return {"ap": 100.0 * avg_prec}
def __magic_name__ ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[str] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[Any] ) -> List[Any]:
if out_image_dir and not os.path.exists(__lowerCAmelCase ):
os.makedirs(__lowerCAmelCase )
__lowerCamelCase = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
__lowerCamelCase = make_precision_recall_eval(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , out_image=os.path.join(__lowerCAmelCase , '''pr_exact.png''' ) , title='''Precision-Recall curve for Exact Match score''' , )
__lowerCamelCase = make_precision_recall_eval(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , out_image=os.path.join(__lowerCAmelCase , '''pr_f1.png''' ) , title='''Precision-Recall curve for F1 score''' , )
__lowerCamelCase = {k: float(__lowerCAmelCase ) for k, v in qid_to_has_ans.items()}
__lowerCamelCase = make_precision_recall_eval(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , out_image=os.path.join(__lowerCAmelCase , '''pr_oracle.png''' ) , title='''Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)''' , )
merge_eval(__lowerCAmelCase , __lowerCAmelCase , '''pr_exact''' )
merge_eval(__lowerCAmelCase , __lowerCAmelCase , '''pr_f1''' )
merge_eval(__lowerCAmelCase , __lowerCAmelCase , '''pr_oracle''' )
def __magic_name__ ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int ) -> Optional[Any]:
if not qid_list:
return
__lowerCamelCase = [na_probs[k] for k in qid_list]
__lowerCamelCase = np.ones_like(__lowerCAmelCase ) / float(len(__lowerCAmelCase ) )
plt.hist(__lowerCAmelCase , weights=__lowerCAmelCase , bins=20 , range=(0.0, 1.0) )
plt.xlabel('''Model probability of no-answer''' )
plt.ylabel('''Proportion of dataset''' )
plt.title(f'''Histogram of no-answer probability: {name}''' )
plt.savefig(os.path.join(__lowerCAmelCase , f'''na_prob_hist_{name}.png''' ) )
plt.clf()
def __magic_name__ ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any] ) -> Optional[int]:
__lowerCamelCase = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
__lowerCamelCase = num_no_ans
__lowerCamelCase = cur_score
__lowerCamelCase = 0.0
__lowerCamelCase = sorted(__lowerCAmelCase , key=lambda __lowerCAmelCase : na_probs[k] )
for i, qid in enumerate(__lowerCAmelCase ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
__lowerCamelCase = scores[qid]
else:
if preds[qid]:
__lowerCamelCase = -1
else:
__lowerCamelCase = 0
cur_score += diff
if cur_score > best_score:
__lowerCamelCase = cur_score
__lowerCamelCase = na_probs[qid]
return 100.0 * best_score / len(__lowerCAmelCase ), best_thresh
def __magic_name__ ( __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : int , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : int , __lowerCAmelCase : Optional[Any] ) -> int:
__lowerCamelCase , __lowerCamelCase = find_best_thresh(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
__lowerCamelCase , __lowerCamelCase = find_best_thresh(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
__lowerCamelCase = best_exact
__lowerCamelCase = exact_thresh
__lowerCamelCase = best_fa
__lowerCamelCase = fa_thresh
def __magic_name__ ( ) -> Optional[int]:
with open(OPTS.data_file ) as f:
__lowerCamelCase = json.load(__lowerCAmelCase )
__lowerCamelCase = dataset_json['''data''']
with open(OPTS.pred_file ) as f:
__lowerCamelCase = json.load(__lowerCAmelCase )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
__lowerCamelCase = json.load(__lowerCAmelCase )
else:
__lowerCamelCase = {k: 0.0 for k in preds}
__lowerCamelCase = make_qid_to_has_ans(__lowerCAmelCase ) # maps qid to True/False
__lowerCamelCase = [k for k, v in qid_to_has_ans.items() if v]
__lowerCamelCase = [k for k, v in qid_to_has_ans.items() if not v]
__lowerCamelCase , __lowerCamelCase = get_raw_scores(__lowerCAmelCase , __lowerCAmelCase )
__lowerCamelCase = apply_no_ans_threshold(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , OPTS.na_prob_thresh )
__lowerCamelCase = apply_no_ans_threshold(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , OPTS.na_prob_thresh )
__lowerCamelCase = make_eval_dict(__lowerCAmelCase , __lowerCAmelCase )
if has_ans_qids:
__lowerCamelCase = make_eval_dict(__lowerCAmelCase , __lowerCAmelCase , qid_list=__lowerCAmelCase )
merge_eval(__lowerCAmelCase , __lowerCAmelCase , '''HasAns''' )
if no_ans_qids:
__lowerCamelCase = make_eval_dict(__lowerCAmelCase , __lowerCAmelCase , qid_list=__lowerCAmelCase )
merge_eval(__lowerCAmelCase , __lowerCAmelCase , '''NoAns''' )
if OPTS.na_prob_file:
find_all_best_thresh(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , OPTS.out_image_dir )
histogram_na_prob(__lowerCAmelCase , __lowerCAmelCase , OPTS.out_image_dir , '''hasAns''' )
histogram_na_prob(__lowerCAmelCase , __lowerCAmelCase , OPTS.out_image_dir , '''noAns''' )
if OPTS.out_file:
with open(OPTS.out_file , '''w''' ) as f:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
else:
print(json.dumps(__lowerCAmelCase , indent=2 ) )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Any = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
main()
| 270 | 0 |
import argparse
import os
from pathlib import Path
import torch
from bark.generation import _load_model as _bark_load_model
from huggingface_hub import hf_hub_download
from transformers import EncodecConfig, EncodecModel, set_seed
from transformers.models.bark.configuration_bark import (
BarkCoarseConfig,
BarkConfig,
BarkFineConfig,
BarkSemanticConfig,
)
from transformers.models.bark.generation_configuration_bark import (
BarkCoarseGenerationConfig,
BarkFineGenerationConfig,
BarkGenerationConfig,
BarkSemanticGenerationConfig,
)
from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase = logging.get_logger(__name__)
set_seed(770)
UpperCamelCase = {
"c_attn": "att_proj",
"c_proj": "out_proj",
"c_fc": "in_proj",
"transformer.": "",
"h.": "layers.",
"ln_1": "layernorm_1",
"ln_2": "layernorm_2",
"ln_f": "layernorm_final",
"wpe": "position_embeds_layer",
"wte": "input_embeds_layer",
}
UpperCamelCase = {
"text_small": {
"repo_id": "suno/bark",
"file_name": "text.pt",
},
"coarse_small": {
"repo_id": "suno/bark",
"file_name": "coarse.pt",
},
"fine_small": {
"repo_id": "suno/bark",
"file_name": "fine.pt",
},
"text": {
"repo_id": "suno/bark",
"file_name": "text_2.pt",
},
"coarse": {
"repo_id": "suno/bark",
"file_name": "coarse_2.pt",
},
"fine": {
"repo_id": "suno/bark",
"file_name": "fine_2.pt",
},
}
UpperCamelCase = os.path.dirname(os.path.abspath(__file__))
UpperCamelCase = os.path.join(os.path.expanduser('''~'''), '''.cache''')
UpperCamelCase = os.path.join(os.getenv('''XDG_CACHE_HOME''', default_cache_dir), '''suno''', '''bark_v0''')
def __lowerCamelCase ( snake_case__ ,snake_case__=False ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = model_type
if use_small:
key += "_small"
return os.path.join(__lowerCAmelCase ,REMOTE_MODEL_PATHS[key]["""file_name"""] )
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> Union[str, Any]:
"""simple docstring"""
os.makedirs(__lowerCAmelCase ,exist_ok=__lowerCAmelCase )
hf_hub_download(repo_id=__lowerCAmelCase ,filename=__lowerCAmelCase ,local_dir=__lowerCAmelCase )
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__=False ,snake_case__="text" ) -> List[str]:
"""simple docstring"""
if model_type == "text":
_SCREAMING_SNAKE_CASE = BarkSemanticModel
_SCREAMING_SNAKE_CASE = BarkSemanticConfig
_SCREAMING_SNAKE_CASE = BarkSemanticGenerationConfig
elif model_type == "coarse":
_SCREAMING_SNAKE_CASE = BarkCoarseModel
_SCREAMING_SNAKE_CASE = BarkCoarseConfig
_SCREAMING_SNAKE_CASE = BarkCoarseGenerationConfig
elif model_type == "fine":
_SCREAMING_SNAKE_CASE = BarkFineModel
_SCREAMING_SNAKE_CASE = BarkFineConfig
_SCREAMING_SNAKE_CASE = BarkFineGenerationConfig
else:
raise NotImplementedError()
_SCREAMING_SNAKE_CASE = F'{model_type}_small' if use_small else model_type
_SCREAMING_SNAKE_CASE = REMOTE_MODEL_PATHS[model_key]
if not os.path.exists(__lowerCAmelCase ):
logger.info(F'{model_type} model not found, downloading into `{CACHE_DIR}`.' )
_download(model_info["""repo_id"""] ,model_info["""file_name"""] )
_SCREAMING_SNAKE_CASE = torch.load(__lowerCAmelCase ,map_location=__lowerCAmelCase )
# this is a hack
_SCREAMING_SNAKE_CASE = checkpoint["""model_args"""]
if "input_vocab_size" not in model_args:
_SCREAMING_SNAKE_CASE = model_args["""vocab_size"""]
_SCREAMING_SNAKE_CASE = model_args["""vocab_size"""]
del model_args["vocab_size"]
# convert Bark model arguments to HF Bark model arguments
_SCREAMING_SNAKE_CASE = model_args.pop("""n_head""" )
_SCREAMING_SNAKE_CASE = model_args.pop("""n_embd""" )
_SCREAMING_SNAKE_CASE = model_args.pop("""n_layer""" )
_SCREAMING_SNAKE_CASE = ConfigClass(**checkpoint["""model_args"""] )
_SCREAMING_SNAKE_CASE = ModelClass(config=__lowerCAmelCase )
_SCREAMING_SNAKE_CASE = GenerationConfigClass()
_SCREAMING_SNAKE_CASE = model_generation_config
_SCREAMING_SNAKE_CASE = checkpoint["""model"""]
# fixup checkpoint
_SCREAMING_SNAKE_CASE = """_orig_mod."""
for k, v in list(state_dict.items() ):
if k.startswith(__lowerCAmelCase ):
# replace part of the key with corresponding layer name in HF implementation
_SCREAMING_SNAKE_CASE = k[len(__lowerCAmelCase ) :]
for old_layer_name in new_layer_name_dict:
_SCREAMING_SNAKE_CASE = new_k.replace(__lowerCAmelCase ,new_layer_name_dict[old_layer_name] )
_SCREAMING_SNAKE_CASE = state_dict.pop(__lowerCAmelCase )
_SCREAMING_SNAKE_CASE = set(state_dict.keys() ) - set(model.state_dict().keys() )
_SCREAMING_SNAKE_CASE = {k for k in extra_keys if not k.endswith(""".attn.bias""" )}
_SCREAMING_SNAKE_CASE = set(model.state_dict().keys() ) - set(state_dict.keys() )
_SCREAMING_SNAKE_CASE = {k for k in missing_keys if not k.endswith(""".attn.bias""" )}
if len(__lowerCAmelCase ) != 0:
raise ValueError(F'extra keys found: {extra_keys}' )
if len(__lowerCAmelCase ) != 0:
raise ValueError(F'missing keys: {missing_keys}' )
model.load_state_dict(__lowerCAmelCase ,strict=__lowerCAmelCase )
_SCREAMING_SNAKE_CASE = model.num_parameters(exclude_embeddings=__lowerCAmelCase )
_SCREAMING_SNAKE_CASE = checkpoint["""best_val_loss"""].item()
logger.info(F'model loaded: {round(n_params/1e6 ,1 )}M params, {round(__lowerCAmelCase ,3 )} loss' )
model.eval()
model.to(__lowerCAmelCase )
del checkpoint, state_dict
return model
def __lowerCamelCase ( snake_case__ ,snake_case__=False ,snake_case__="text" ) -> List[str]:
"""simple docstring"""
if model_type not in ("text", "coarse", "fine"):
raise NotImplementedError()
_SCREAMING_SNAKE_CASE = """cpu""" # do conversion on cpu
_SCREAMING_SNAKE_CASE = _get_ckpt_path(__lowerCAmelCase ,use_small=__lowerCAmelCase )
_SCREAMING_SNAKE_CASE = _load_model(__lowerCAmelCase ,__lowerCAmelCase ,model_type=__lowerCAmelCase ,use_small=__lowerCAmelCase )
# load bark initial model
_SCREAMING_SNAKE_CASE = _bark_load_model(__lowerCAmelCase ,"""cpu""" ,model_type=__lowerCAmelCase ,use_small=__lowerCAmelCase )
if model_type == "text":
_SCREAMING_SNAKE_CASE = bark_model["""model"""]
if model.num_parameters(exclude_embeddings=__lowerCAmelCase ) != bark_model.get_num_params():
raise ValueError("""initial and new models don\'t have the same number of parameters""" )
# check if same output as the bark model
_SCREAMING_SNAKE_CASE = 5
_SCREAMING_SNAKE_CASE = 10
if model_type in ["text", "coarse"]:
_SCREAMING_SNAKE_CASE = torch.randint(2_56 ,(batch_size, sequence_length) ,dtype=torch.int )
_SCREAMING_SNAKE_CASE = bark_model(__lowerCAmelCase )[0]
_SCREAMING_SNAKE_CASE = model(__lowerCAmelCase )
# take last logits
_SCREAMING_SNAKE_CASE = output_new_model_total.logits[:, [-1], :]
else:
_SCREAMING_SNAKE_CASE = 3
_SCREAMING_SNAKE_CASE = 8
_SCREAMING_SNAKE_CASE = torch.randint(2_56 ,(batch_size, sequence_length, n_codes_total) ,dtype=torch.int )
_SCREAMING_SNAKE_CASE = model(__lowerCAmelCase ,__lowerCAmelCase )
_SCREAMING_SNAKE_CASE = bark_model(__lowerCAmelCase ,__lowerCAmelCase )
_SCREAMING_SNAKE_CASE = output_new_model_total.logits
# output difference should come from the difference of self-attention implementation design
if output_new_model.shape != output_old_model.shape:
raise ValueError("""initial and new outputs don\'t have the same shape""" )
if (output_new_model - output_old_model).abs().max().item() > 1e-3:
raise ValueError("""initial and new outputs are not equal""" )
Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase )
model.save_pretrained(__lowerCAmelCase )
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = os.path.join(__lowerCAmelCase ,__lowerCAmelCase )
_SCREAMING_SNAKE_CASE = BarkSemanticConfig.from_pretrained(os.path.join(__lowerCAmelCase ,"""config.json""" ) )
_SCREAMING_SNAKE_CASE = BarkCoarseConfig.from_pretrained(os.path.join(__lowerCAmelCase ,"""config.json""" ) )
_SCREAMING_SNAKE_CASE = BarkFineConfig.from_pretrained(os.path.join(__lowerCAmelCase ,"""config.json""" ) )
_SCREAMING_SNAKE_CASE = EncodecConfig.from_pretrained("""facebook/encodec_24khz""" )
_SCREAMING_SNAKE_CASE = BarkSemanticModel.from_pretrained(__lowerCAmelCase )
_SCREAMING_SNAKE_CASE = BarkCoarseModel.from_pretrained(__lowerCAmelCase )
_SCREAMING_SNAKE_CASE = BarkFineModel.from_pretrained(__lowerCAmelCase )
_SCREAMING_SNAKE_CASE = EncodecModel.from_pretrained("""facebook/encodec_24khz""" )
_SCREAMING_SNAKE_CASE = BarkConfig.from_sub_model_configs(
__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase )
_SCREAMING_SNAKE_CASE = BarkGenerationConfig.from_sub_model_configs(
semantic.generation_config ,coarseAcoustic.generation_config ,fineAcoustic.generation_config )
_SCREAMING_SNAKE_CASE = BarkModel(__lowerCAmelCase )
_SCREAMING_SNAKE_CASE = semantic
_SCREAMING_SNAKE_CASE = coarseAcoustic
_SCREAMING_SNAKE_CASE = fineAcoustic
_SCREAMING_SNAKE_CASE = codec
_SCREAMING_SNAKE_CASE = bark_generation_config
Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase )
bark.save_pretrained(__lowerCAmelCase ,repo_id=__lowerCAmelCase ,push_to_hub=__lowerCAmelCase )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''model_type''', type=str, help='''text, coarse or fine.''')
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--is_small''', action='''store_true''', help='''convert the small version instead of the large.''')
UpperCamelCase = parser.parse_args()
load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
| 306 |
import math
import os
import re
import sys
import unittest
from pathlib import Path
from typing import Tuple
from unittest.mock import patch
from parameterized import parameterized
from transformers.testing_utils import (
CaptureStderr,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
get_torch_dist_unique_port,
require_apex,
require_bitsandbytes,
require_fairscale,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
require_torch_non_multi_gpu,
slow,
)
from transformers.trainer_callback import TrainerState
from transformers.trainer_utils import set_seed
SCREAMING_SNAKE_CASE__ : Optional[int] = os.path.abspath(os.path.dirname(__file__))
with ExtendSysPath(F'{bindir}/../../examples/pytorch/translation'):
from run_translation import main # noqa
set_seed(42)
SCREAMING_SNAKE_CASE__ : Any = "sshleifer/student_marian_en_ro_6_1"
SCREAMING_SNAKE_CASE__ : Tuple = "sshleifer/tiny-mbart"
@require_torch
class lowerCAmelCase__ ( __lowercase ):
def __A ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple=False , SCREAMING_SNAKE_CASE__ : Optional[Any]=None , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : Optional[int]=True , ) -> Optional[int]:
__lowerCamelCase = self.run_trainer(
eval_steps=1 , max_len=12 , model_name=SCREAMING_SNAKE_CASE__ , num_train_epochs=1 , distributed=SCREAMING_SNAKE_CASE__ , extra_args_str=SCREAMING_SNAKE_CASE__ , predict_with_generate=SCREAMING_SNAKE_CASE__ , do_train=SCREAMING_SNAKE_CASE__ , do_eval=SCREAMING_SNAKE_CASE__ , do_predict=SCREAMING_SNAKE_CASE__ , )
__lowerCamelCase = TrainerState.load_from_json(os.path.join(SCREAMING_SNAKE_CASE__ , '''trainer_state.json''' ) ).log_history
if not do_eval:
return
__lowerCamelCase = [log for log in logs if '''eval_loss''' in log.keys()]
__lowerCamelCase = eval_metrics[0]
if predict_with_generate:
assert "eval_bleu" in first_step_stats
__lowerCamelCase = eval_metrics[-1]
assert isinstance(last_step_stats['''eval_bleu'''] , SCREAMING_SNAKE_CASE__ )
assert not math.isnan(float(last_step_stats['''eval_loss'''] ) ), "eval_loss must not be `nan`"
@require_torch_non_multi_gpu
def __A ( self : Optional[int] ) -> int:
self.run_seqaseq_quick()
@require_torch_multi_gpu
def __A ( self : int ) -> List[str]:
self.run_seqaseq_quick(distributed=SCREAMING_SNAKE_CASE__ )
@require_torch_multi_gpu
def __A ( self : Optional[Any] ) -> Tuple:
self.run_seqaseq_quick(distributed=SCREAMING_SNAKE_CASE__ )
@unittest.skip('''Requires an update of the env running those tests''' )
@require_torch_multi_gpu
@require_fairscale
def __A ( self : Dict ) -> Tuple:
self.run_seqaseq_quick(distributed=SCREAMING_SNAKE_CASE__ , extra_args_str='''--sharded_ddp simple''' )
@unittest.skip('''Requires an update of the env running those tests''' )
@require_torch_multi_gpu
@require_fairscale
def __A ( self : Optional[int] ) -> List[str]:
self.run_seqaseq_quick(distributed=SCREAMING_SNAKE_CASE__ , extra_args_str='''--sharded_ddp simple --fp16''' )
@unittest.skip('''Requires an update of the env running those tests''' )
@require_torch_multi_gpu
@require_fairscale
def __A ( self : Tuple ) -> Any:
self.run_seqaseq_quick(distributed=SCREAMING_SNAKE_CASE__ , extra_args_str='''--sharded_ddp zero_dp_2''' , predict_with_generate=SCREAMING_SNAKE_CASE__ )
@unittest.skip('''Requires an update of the env running those tests''' )
@require_torch_multi_gpu
@require_fairscale
def __A ( self : Dict ) -> Tuple:
self.run_seqaseq_quick(
distributed=SCREAMING_SNAKE_CASE__ , extra_args_str='''--sharded_ddp zero_dp_2 --fp16''' , predict_with_generate=SCREAMING_SNAKE_CASE__ )
@require_apex
@require_torch_gpu
def __A ( self : Union[str, Any] ) -> List[str]:
# XXX: apex breaks the trainer if it's run twice e.g. run_seq2seq.main() from the same
# program and it breaks other tests that run from the same pytest worker, therefore until this is
# sorted out it must be run only in an external program, that is distributed=True in this
# test and only under one or more gpus - if we want cpu will need to make a special test
#
# specifically to the problem traced it to self.optimizer.step() - if it's run 2nd time via
# 2nd main() call it botches the future eval.
#
self.run_seqaseq_quick(distributed=SCREAMING_SNAKE_CASE__ , extra_args_str='''--fp16 --fp16_backend=apex''' )
# test 2nd time - was getting eval_loss': nan'
# to reproduce the problem set distributed=False
self.run_seqaseq_quick(distributed=SCREAMING_SNAKE_CASE__ , extra_args_str='''--fp16 --fp16_backend=apex''' )
@parameterized.expand(['''base''', '''low''', '''high''', '''mixed'''] )
@require_torch_multi_gpu
def __A ( self : Any , SCREAMING_SNAKE_CASE__ : List[str] ) -> Optional[Any]:
# as each sub-test is slow-ish split into multiple sub-tests to avoid CI timeout
__lowerCamelCase = {
# test with the default log_level - should be info and thus log info once
'''base''': {'''extra_args_str''': '''''', '''n_matches''': 1},
# test with low log_level and log_level_replica - should be noisy on all processes
# now the info string should appear twice on 2 processes
'''low''': {'''extra_args_str''': '''--log_level debug --log_level_replica debug''', '''n_matches''': 2},
# test with high log_level and low log_level_replica
# now the info string should appear once only on the replica
'''high''': {'''extra_args_str''': '''--log_level error --log_level_replica debug''', '''n_matches''': 1},
# test with high log_level and log_level_replica - should be quiet on all processes
'''mixed''': {'''extra_args_str''': '''--log_level error --log_level_replica error''', '''n_matches''': 0},
}
__lowerCamelCase = experiments[experiment_id]
__lowerCamelCase = {'''distributed''': True, '''predict_with_generate''': False, '''do_eval''': False, '''do_predict''': False}
__lowerCamelCase = '''Running training'''
with CaptureStderr() as cl:
self.run_seqaseq_quick(**SCREAMING_SNAKE_CASE__ , extra_args_str=data['''extra_args_str'''] )
__lowerCamelCase = len(re.findall(SCREAMING_SNAKE_CASE__ , cl.err ) )
self.assertEqual(SCREAMING_SNAKE_CASE__ , data['''n_matches'''] )
@slow
def __A ( self : Any ) -> Optional[Any]:
__lowerCamelCase = self.run_trainer(
eval_steps=2 , max_len=1_28 , model_name=SCREAMING_SNAKE_CASE__ , learning_rate=3e-4 , num_train_epochs=10 , distributed=SCREAMING_SNAKE_CASE__ , )
# Check metrics
__lowerCamelCase = TrainerState.load_from_json(os.path.join(SCREAMING_SNAKE_CASE__ , '''trainer_state.json''' ) ).log_history
__lowerCamelCase = [log for log in logs if '''eval_loss''' in log.keys()]
__lowerCamelCase = eval_metrics[0]
__lowerCamelCase = eval_metrics[-1]
assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing"
assert isinstance(last_step_stats['''eval_bleu'''] , SCREAMING_SNAKE_CASE__ )
# test if do_predict saves generations and metrics
__lowerCamelCase = os.listdir(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = {os.path.basename(SCREAMING_SNAKE_CASE__ ) for p in contents}
assert "generated_predictions.txt" in contents
assert "predict_results.json" in contents
@slow
@require_bitsandbytes
def __A ( self : Optional[int] ) -> str:
from transformers.training_args import OptimizerNames
def train_and_return_metrics(SCREAMING_SNAKE_CASE__ : str ) -> Tuple[int, float]:
__lowerCamelCase = '''--skip_memory_metrics 0'''
__lowerCamelCase = self.run_trainer(
max_len=1_28 , model_name=SCREAMING_SNAKE_CASE__ , learning_rate=3e-4 , num_train_epochs=1 , optim=SCREAMING_SNAKE_CASE__ , distributed=SCREAMING_SNAKE_CASE__ , extra_args_str=SCREAMING_SNAKE_CASE__ , do_eval=SCREAMING_SNAKE_CASE__ , do_predict=SCREAMING_SNAKE_CASE__ , n_gpus_to_use=1 , )
# Check metrics
__lowerCamelCase = TrainerState.load_from_json(Path(SCREAMING_SNAKE_CASE__ , '''trainer_state.json''' ) ).log_history
__lowerCamelCase = int(logs[0]['''train_mem_gpu_peaked_delta'''] / 2**20 )
__lowerCamelCase = int(logs[0]['''train_mem_gpu_alloc_delta'''] / 2**20 )
__lowerCamelCase = logs[0]['''train_loss''']
return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value )
__lowerCamelCase = gpu_alloc_mem_orig - gpu_alloc_mem_bnb
__lowerCamelCase = gpu_peak_mem_orig + gpu_alloc_mem_orig
__lowerCamelCase = gpu_peak_mem_bnb + gpu_alloc_mem_bnb
__lowerCamelCase = gpu_total_mem_orig - gpu_total_mem_bnb
# sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which
# doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized
# in 2 bytes and the diff in optim memory usage is derived as so:
#
# - normal 25*8=~200MB (8 bytes per param)
# - bnb 25*2= ~50MB (2 bytes per param)
#
# Thus we should expect ~150MB total memory saved.
#
# Peak memory should be the same - the total should be different by about that same margin
#
# After leaving a small margin to accommodate for differences between gpus let's check
# that we have at least 120MB in savings
__lowerCamelCase = 1_20
# uncomment the following if this test starts failing - requires py38 for a new print feature
# gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb
# print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB")
# print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB")
# print(f"{gpu_alloc_mem_diff=}MB")
# print(f"{gpu_peak_mem_diff=}MB")
# print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB")
# print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB")
self.assertGreater(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , '''should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got'''
f''' a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and'''
f''' gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB''' , )
self.assertGreater(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , '''should use ~150MB less total gpu memory with BNB, compared to without it for this model but got'''
f''' a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and'''
f''' gpu_total_mem_bnb={gpu_total_mem_bnb}MB''' , )
self.assertEqual(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , f'''loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}''' )
def __A ( self : Dict , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : float = 3e-3 , SCREAMING_SNAKE_CASE__ : str = "adafactor" , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : str = None , SCREAMING_SNAKE_CASE__ : int = 0 , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : int = None , ) -> List[Any]:
__lowerCamelCase = self.test_file_dir / '''../fixtures/tests_samples/wmt_en_ro'''
__lowerCamelCase = self.get_auto_remove_tmp_dir()
__lowerCamelCase = f'''
--model_name_or_path {model_name}
--train_file {data_dir}/train.json
--validation_file {data_dir}/val.json
--test_file {data_dir}/test.json
--output_dir {output_dir}
--overwrite_output_dir
--max_train_samples 8
--max_source_length {max_len}
--max_target_length {max_len}
--do_train
--num_train_epochs {str(SCREAMING_SNAKE_CASE__ )}
--per_device_train_batch_size 4
--learning_rate {learning_rate}
--warmup_steps 8
--logging_steps 0
--logging_strategy no
--save_steps {str(SCREAMING_SNAKE_CASE__ )}
--group_by_length
--label_smoothing_factor 0.1
--target_lang ro_RO
--source_lang en_XX
'''.split()
__lowerCamelCase = f'''
--do_eval
--per_device_eval_batch_size 4
--max_eval_samples 8
--val_max_target_length {max_len}
--evaluation_strategy steps
--eval_steps {str(SCREAMING_SNAKE_CASE__ )}
'''.split()
__lowerCamelCase = '''
--do_predict
'''.split()
__lowerCamelCase = []
if do_train:
args += args_train
if do_eval:
args += args_eval
if do_predict:
args += args_predict
if predict_with_generate:
args += "--predict_with_generate".split()
if do_train:
if optim == "adafactor":
args += "--adafactor".split()
else:
args += f'''--optim {optim}'''.split()
if extra_args_str is not None:
args += extra_args_str.split()
if distributed:
if n_gpus_to_use is None:
__lowerCamelCase = get_gpu_count()
__lowerCamelCase = get_torch_dist_unique_port()
__lowerCamelCase = f'''
-m torch.distributed.run
--nproc_per_node={n_gpus_to_use}
--master_port={master_port}
{self.examples_dir_str}/pytorch/translation/run_translation.py
'''.split()
__lowerCamelCase = [sys.executable] + distributed_args + args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(SCREAMING_SNAKE_CASE__ , env=self.get_env() )
else:
__lowerCamelCase = ['''run_translation.py'''] + args
with patch.object(SCREAMING_SNAKE_CASE__ , '''argv''' , SCREAMING_SNAKE_CASE__ ):
main()
return output_dir
| 270 | 0 |
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from transformers import SwinConfig, SwinForMaskedImageModeling, ViTImageProcessor
def UpperCAmelCase_ ( __lowerCamelCase : Optional[Any] ):
lowercase_ :Dict = SwinConfig(image_size=1_92 )
if "base" in model_name:
lowercase_ :List[str] = 6
lowercase_ :List[str] = 1_28
lowercase_ :Optional[int] = (2, 2, 18, 2)
lowercase_ :Union[str, Any] = (4, 8, 16, 32)
elif "large" in model_name:
lowercase_ :Union[str, Any] = 12
lowercase_ :int = 1_92
lowercase_ :Any = (2, 2, 18, 2)
lowercase_ :Optional[int] = (6, 12, 24, 48)
else:
raise ValueError("Model not supported, only supports base and large variants" )
lowercase_ :Optional[Any] = window_size
lowercase_ :Any = embed_dim
lowercase_ :List[str] = depths
lowercase_ :Optional[Any] = num_heads
return config
def UpperCAmelCase_ ( __lowerCamelCase : int ):
if "encoder.mask_token" in name:
lowercase_ :Tuple = name.replace("encoder.mask_token" ,"embeddings.mask_token" )
if "encoder.patch_embed.proj" in name:
lowercase_ :List[str] = name.replace("encoder.patch_embed.proj" ,"embeddings.patch_embeddings.projection" )
if "encoder.patch_embed.norm" in name:
lowercase_ :int = name.replace("encoder.patch_embed.norm" ,"embeddings.norm" )
if "attn.proj" in name:
lowercase_ :Tuple = name.replace("attn.proj" ,"attention.output.dense" )
if "attn" in name:
lowercase_ :Tuple = name.replace("attn" ,"attention.self" )
if "norm1" in name:
lowercase_ :List[str] = name.replace("norm1" ,"layernorm_before" )
if "norm2" in name:
lowercase_ :str = name.replace("norm2" ,"layernorm_after" )
if "mlp.fc1" in name:
lowercase_ :Optional[Any] = name.replace("mlp.fc1" ,"intermediate.dense" )
if "mlp.fc2" in name:
lowercase_ :List[str] = name.replace("mlp.fc2" ,"output.dense" )
if name == "encoder.norm.weight":
lowercase_ :List[Any] = "layernorm.weight"
if name == "encoder.norm.bias":
lowercase_ :Dict = "layernorm.bias"
if "decoder" in name:
pass
else:
lowercase_ :Union[str, Any] = "swin." + name
return name
def UpperCAmelCase_ ( __lowerCamelCase : int ,__lowerCamelCase : Tuple ):
for key in orig_state_dict.copy().keys():
lowercase_ :Union[str, Any] = orig_state_dict.pop(__lowerCAmelCase )
if "attn_mask" in key:
pass
elif "qkv" in key:
lowercase_ :Dict = key.split("." )
lowercase_ :List[str] = int(key_split[2] )
lowercase_ :Dict = int(key_split[4] )
lowercase_ :Tuple = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
lowercase_ :Dict = val[:dim, :]
lowercase_ :str = val[
dim : dim * 2, :
]
lowercase_ :Any = val[-dim:, :]
else:
lowercase_ :Tuple = val[
:dim
]
lowercase_ :Optional[Any] = val[
dim : dim * 2
]
lowercase_ :List[str] = val[
-dim:
]
else:
lowercase_ :Optional[int] = val
return orig_state_dict
def UpperCAmelCase_ ( __lowerCamelCase : Optional[int] ,__lowerCamelCase : Optional[Any] ,__lowerCamelCase : Optional[Any] ,__lowerCamelCase : Tuple ):
lowercase_ :Dict = torch.load(__lowerCAmelCase ,map_location="cpu" )["model"]
lowercase_ :List[str] = get_swin_config(__lowerCAmelCase )
lowercase_ :int = SwinForMaskedImageModeling(__lowerCAmelCase )
model.eval()
lowercase_ :List[Any] = convert_state_dict(__lowerCAmelCase ,__lowerCAmelCase )
model.load_state_dict(__lowerCAmelCase )
lowercase_ :Optional[Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowercase_ :Union[str, Any] = ViTImageProcessor(size={"height": 1_92, "width": 1_92} )
lowercase_ :Optional[int] = Image.open(requests.get(__lowerCAmelCase ,stream=__lowerCAmelCase ).raw )
lowercase_ :List[str] = image_processor(images=__lowerCAmelCase ,return_tensors="pt" )
with torch.no_grad():
lowercase_ :Tuple = model(**__lowerCAmelCase ).logits
print(outputs.keys() )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(F'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(__lowerCAmelCase )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(__lowerCAmelCase )
if push_to_hub:
print(F'Pushing model and image processor for {model_name} to hub' )
model.push_to_hub(F'microsoft/{model_name}' )
image_processor.push_to_hub(F'microsoft/{model_name}' )
if __name__ == "__main__":
lowerCAmelCase : Tuple =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''swin-base-simmim-window6-192''',
type=str,
choices=['''swin-base-simmim-window6-192''', '''swin-large-simmim-window12-192'''],
help='''Name of the Swin SimMIM model you\'d like to convert.''',
)
parser.add_argument(
'''--checkpoint_path''',
default='''/Users/nielsrogge/Documents/SwinSimMIM/simmim_pretrain__swin_base__img192_window6__100ep.pth''',
type=str,
help='''Path to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
lowerCAmelCase : Tuple =parser.parse_args()
convert_swin_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 223 |
import copy
from typing import Dict, List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
SCREAMING_SNAKE_CASE__ : Dict = {
"facebook/mask2former-swin-small-coco-instance": (
"https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json"
)
# See all Mask2Former models at https://huggingface.co/models?filter=mask2former
}
SCREAMING_SNAKE_CASE__ : int = logging.get_logger(__name__)
class lowerCAmelCase__ ( __lowercase ):
a__ : Any = """mask2former"""
a__ : Dict = ["""swin"""]
a__ : Any = {"""hidden_size""": """hidden_dim"""}
def __init__( self : Any , SCREAMING_SNAKE_CASE__ : Optional[Dict] = None , SCREAMING_SNAKE_CASE__ : int = 2_56 , SCREAMING_SNAKE_CASE__ : int = 2_56 , SCREAMING_SNAKE_CASE__ : int = 2_56 , SCREAMING_SNAKE_CASE__ : int = 10_24 , SCREAMING_SNAKE_CASE__ : str = "relu" , SCREAMING_SNAKE_CASE__ : int = 6 , SCREAMING_SNAKE_CASE__ : int = 10 , SCREAMING_SNAKE_CASE__ : int = 8 , SCREAMING_SNAKE_CASE__ : float = 0.0 , SCREAMING_SNAKE_CASE__ : int = 20_48 , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : int = 4 , SCREAMING_SNAKE_CASE__ : int = 2_55 , SCREAMING_SNAKE_CASE__ : int = 1_00 , SCREAMING_SNAKE_CASE__ : float = 0.1 , SCREAMING_SNAKE_CASE__ : float = 2.0 , SCREAMING_SNAKE_CASE__ : float = 5.0 , SCREAMING_SNAKE_CASE__ : float = 5.0 , SCREAMING_SNAKE_CASE__ : int = 1_25_44 , SCREAMING_SNAKE_CASE__ : float = 3.0 , SCREAMING_SNAKE_CASE__ : float = 0.75 , SCREAMING_SNAKE_CASE__ : float = 0.02 , SCREAMING_SNAKE_CASE__ : float = 1.0 , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : List[int] = [4, 8, 16, 32] , SCREAMING_SNAKE_CASE__ : bool = None , **SCREAMING_SNAKE_CASE__ : Tuple , ) -> str:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.''' )
__lowerCamelCase = CONFIG_MAPPING['''swin'''](
image_size=2_24 , in_channels=3 , patch_size=4 , embed_dim=96 , depths=[2, 2, 18, 2] , num_heads=[3, 6, 12, 24] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=SCREAMING_SNAKE_CASE__ , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] , )
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase = backbone_config.pop('''model_type''' )
__lowerCamelCase = CONFIG_MAPPING[backbone_model_type]
__lowerCamelCase = config_class.from_dict(SCREAMING_SNAKE_CASE__ )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f'''Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. '''
f'''Supported model types: {','.join(self.backbones_supported )}''' )
__lowerCamelCase = backbone_config
__lowerCamelCase = feature_size
__lowerCamelCase = mask_feature_size
__lowerCamelCase = hidden_dim
__lowerCamelCase = encoder_feedforward_dim
__lowerCamelCase = activation_function
__lowerCamelCase = encoder_layers
__lowerCamelCase = decoder_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = dropout
__lowerCamelCase = dim_feedforward
__lowerCamelCase = pre_norm
__lowerCamelCase = enforce_input_projection
__lowerCamelCase = common_stride
__lowerCamelCase = ignore_value
__lowerCamelCase = num_queries
__lowerCamelCase = no_object_weight
__lowerCamelCase = class_weight
__lowerCamelCase = mask_weight
__lowerCamelCase = dice_weight
__lowerCamelCase = train_num_points
__lowerCamelCase = oversample_ratio
__lowerCamelCase = importance_sample_ratio
__lowerCamelCase = init_std
__lowerCamelCase = init_xavier_std
__lowerCamelCase = use_auxiliary_loss
__lowerCamelCase = feature_strides
__lowerCamelCase = output_auxiliary_logits
__lowerCamelCase = decoder_layers
super().__init__(**SCREAMING_SNAKE_CASE__ )
@classmethod
def __A ( cls : Any , SCREAMING_SNAKE_CASE__ : PretrainedConfig , **SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> List[Any]:
return cls(
backbone_config=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
def __A ( self : Any ) -> Dict[str, any]:
__lowerCamelCase = copy.deepcopy(self.__dict__ )
__lowerCamelCase = self.backbone_config.to_dict()
__lowerCamelCase = self.__class__.model_type
return output
| 270 | 0 |
'''simple docstring'''
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
__lowerCAmelCase : Optional[Any] =logging.get_logger(__name__)
class UpperCAmelCase ( __lowercase ):
__lowercase = """linear"""
__lowercase = """cosine"""
__lowercase = """cosine_with_restarts"""
__lowercase = """polynomial"""
__lowercase = """constant"""
__lowercase = """constant_with_warmup"""
__lowercase = """piecewise_constant"""
def UpperCamelCase ( _lowerCamelCase : Optimizer , _lowerCamelCase : int = -1 ):
return LambdaLR(__lowerCAmelCase , lambda _lowerCamelCase : 1 , last_epoch=__lowerCAmelCase )
def UpperCamelCase ( _lowerCamelCase : Optimizer , _lowerCamelCase : int , _lowerCamelCase : int = -1 ):
def lr_lambda(_lowerCamelCase : int ):
if current_step < num_warmup_steps:
return float(__lowerCAmelCase ) / float(max(1.0 , __lowerCAmelCase ) )
return 1.0
return LambdaLR(__lowerCAmelCase , __lowerCAmelCase , last_epoch=__lowerCAmelCase )
def UpperCamelCase ( _lowerCamelCase : Optimizer , _lowerCamelCase : str , _lowerCamelCase : int = -1 ):
A__ = {}
A__ = step_rules.split("," )
for rule_str in rule_list[:-1]:
A__, A__ = rule_str.split(":" )
A__ = int(__lowerCAmelCase )
A__ = float(__lowerCAmelCase )
A__ = value
A__ = float(rule_list[-1] )
def create_rules_function(_lowerCamelCase : int , _lowerCamelCase : List[str] ):
def rule_func(_lowerCamelCase : int ) -> float:
A__ = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(__lowerCAmelCase ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
A__ = create_rules_function(__lowerCAmelCase , __lowerCAmelCase )
return LambdaLR(__lowerCAmelCase , __lowerCAmelCase , last_epoch=__lowerCAmelCase )
def UpperCamelCase ( _lowerCamelCase : Dict , _lowerCamelCase : str , _lowerCamelCase : Tuple , _lowerCamelCase : Optional[int]=-1 ):
def lr_lambda(_lowerCamelCase : int ):
if current_step < num_warmup_steps:
return float(__lowerCAmelCase ) / float(max(1 , __lowerCAmelCase ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def UpperCamelCase ( _lowerCamelCase : Optimizer , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : float = 0.5 , _lowerCamelCase : int = -1 ):
def lr_lambda(_lowerCamelCase : List[Any] ):
if current_step < num_warmup_steps:
return float(__lowerCAmelCase ) / float(max(1 , __lowerCAmelCase ) )
A__ = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(__lowerCAmelCase ) * 2.0 * progress )) )
return LambdaLR(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def UpperCamelCase ( _lowerCamelCase : Optimizer , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int = 1 , _lowerCamelCase : int = -1 ):
def lr_lambda(_lowerCamelCase : Dict ):
if current_step < num_warmup_steps:
return float(__lowerCAmelCase ) / float(max(1 , __lowerCAmelCase ) )
A__ = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(__lowerCAmelCase ) * progress) % 1.0) )) )
return LambdaLR(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def UpperCamelCase ( _lowerCamelCase : str , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : List[Any] , _lowerCamelCase : int=1e-7 , _lowerCamelCase : Dict=1.0 , _lowerCamelCase : Union[str, Any]=-1 ):
A__ = optimizer.defaults["lr"]
if not (lr_init > lr_end):
raise ValueError(F"lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})" )
def lr_lambda(_lowerCamelCase : int ):
if current_step < num_warmup_steps:
return float(__lowerCAmelCase ) / float(max(1 , __lowerCAmelCase ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
A__ = lr_init - lr_end
A__ = num_training_steps - num_warmup_steps
A__ = 1 - (current_step - num_warmup_steps) / decay_steps
A__ = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
__lowerCAmelCase : int ={
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def UpperCamelCase ( _lowerCamelCase : Union[str, SchedulerType] , _lowerCamelCase : Optimizer , _lowerCamelCase : Optional[str] = None , _lowerCamelCase : Optional[int] = None , _lowerCamelCase : Optional[int] = None , _lowerCamelCase : int = 1 , _lowerCamelCase : float = 1.0 , _lowerCamelCase : int = -1 , ):
A__ = SchedulerType(__lowerCAmelCase )
A__ = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(__lowerCAmelCase , last_epoch=__lowerCAmelCase )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(__lowerCAmelCase , step_rules=__lowerCAmelCase , last_epoch=__lowerCAmelCase )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(F"{name} requires `num_warmup_steps`, please provide that argument." )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(__lowerCAmelCase , num_warmup_steps=__lowerCAmelCase , last_epoch=__lowerCAmelCase )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(F"{name} requires `num_training_steps`, please provide that argument." )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
__lowerCAmelCase , num_warmup_steps=__lowerCAmelCase , num_training_steps=__lowerCAmelCase , num_cycles=__lowerCAmelCase , last_epoch=__lowerCAmelCase , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
__lowerCAmelCase , num_warmup_steps=__lowerCAmelCase , num_training_steps=__lowerCAmelCase , power=__lowerCAmelCase , last_epoch=__lowerCAmelCase , )
return schedule_func(
__lowerCAmelCase , num_warmup_steps=__lowerCAmelCase , num_training_steps=__lowerCAmelCase , last_epoch=__lowerCAmelCase )
| 237 |
import os
def __magic_name__ ( ) -> str:
__lowerCamelCase = os.path.join(os.path.dirname(__lowerCAmelCase ) , '''num.txt''' )
with open(__lowerCAmelCase ) as file_hand:
return str(sum(int(__lowerCAmelCase ) for line in file_hand ) )[:10]
if __name__ == "__main__":
print(solution())
| 270 | 0 |
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
lowerCAmelCase__ = re.compile(R"""\b(a|an|the)\b""", re.UNICODE)
lowerCAmelCase__ = None
def lowerCAmelCase__ ( ) -> str:
'''simple docstring'''
A__ = argparse.ArgumentParser("Official evaluation script for SQuAD version 2.0." )
parser.add_argument("data_file" , metavar="data.json" , help="Input data JSON file." )
parser.add_argument("pred_file" , metavar="pred.json" , help="Model predictions." )
parser.add_argument(
"--out-file" , "-o" , metavar="eval.json" , help="Write accuracy metrics to file (default is stdout)." )
parser.add_argument(
"--na-prob-file" , "-n" , metavar="na_prob.json" , help="Model estimates of probability of no answer." )
parser.add_argument(
"--na-prob-thresh" , "-t" , type=__lowerCAmelCase , default=1.0 , help="Predict \"\" if no-answer probability exceeds this (default = 1.0)." , )
parser.add_argument(
"--out-image-dir" , "-p" , metavar="out_images" , default=__lowerCAmelCase , help="Save precision-recall curves to directory." )
parser.add_argument("--verbose" , "-v" , action="store_true" )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: List[str] ) -> Union[str, Any]:
'''simple docstring'''
A__ = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
A__ = bool(qa["answers"]["text"] )
return qid_to_has_ans
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Dict ) -> Optional[Any]:
'''simple docstring'''
def remove_articles(SCREAMING_SNAKE_CASE_: Optional[int] ):
return ARTICLES_REGEX.sub(" " , __lowerCAmelCase )
def white_space_fix(SCREAMING_SNAKE_CASE_: Optional[int] ):
return " ".join(text.split() )
def remove_punc(SCREAMING_SNAKE_CASE_: Union[str, Any] ):
A__ = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(SCREAMING_SNAKE_CASE_: Dict ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(__lowerCAmelCase ) ) ) )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: List[Any] ) -> Optional[int]:
'''simple docstring'''
if not s:
return []
return normalize_answer(__lowerCAmelCase ).split()
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Tuple , SCREAMING_SNAKE_CASE_: Tuple ) -> int:
'''simple docstring'''
return int(normalize_answer(__lowerCAmelCase ) == normalize_answer(__lowerCAmelCase ) )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Any , SCREAMING_SNAKE_CASE_: Tuple ) -> str:
'''simple docstring'''
A__ = get_tokens(__lowerCAmelCase )
A__ = get_tokens(__lowerCAmelCase )
A__ = collections.Counter(__lowerCAmelCase ) & collections.Counter(__lowerCAmelCase )
A__ = sum(common.values() )
if len(__lowerCAmelCase ) == 0 or len(__lowerCAmelCase ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
A__ = 1.0 * num_same / len(__lowerCAmelCase )
A__ = 1.0 * num_same / len(__lowerCAmelCase )
A__ = (2 * precision * recall) / (precision + recall)
return fa
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Dict , SCREAMING_SNAKE_CASE_: List[str] ) -> Optional[Any]:
'''simple docstring'''
A__ = {}
A__ = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
A__ = qa["id"]
A__ = [t for t in qa["answers"]["text"] if normalize_answer(__lowerCAmelCase )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
A__ = [""]
if qid not in preds:
print(F'Missing prediction for {qid}' )
continue
A__ = preds[qid]
# Take max over all gold answers
A__ = max(compute_exact(__lowerCAmelCase , __lowerCAmelCase ) for a in gold_answers )
A__ = max(compute_fa(__lowerCAmelCase , __lowerCAmelCase ) for a in gold_answers )
return exact_scores, fa_scores
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Tuple , SCREAMING_SNAKE_CASE_: str , SCREAMING_SNAKE_CASE_: Optional[int] , SCREAMING_SNAKE_CASE_: Union[str, Any] ) -> List[str]:
'''simple docstring'''
A__ = {}
for qid, s in scores.items():
A__ = na_probs[qid] > na_prob_thresh
if pred_na:
A__ = float(not qid_to_has_ans[qid] )
else:
A__ = s
return new_scores
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: List[Any] , SCREAMING_SNAKE_CASE_: Optional[int] , SCREAMING_SNAKE_CASE_: Optional[Any]=None ) -> Union[str, Any]:
'''simple docstring'''
if not qid_list:
A__ = len(__lowerCAmelCase )
return collections.OrderedDict(
[
("exact", 100.0 * sum(exact_scores.values() ) / total),
("f1", 100.0 * sum(fa_scores.values() ) / total),
("total", total),
] )
else:
A__ = len(__lowerCAmelCase )
return collections.OrderedDict(
[
("exact", 100.0 * sum(exact_scores[k] for k in qid_list ) / total),
("f1", 100.0 * sum(fa_scores[k] for k in qid_list ) / total),
("total", total),
] )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Any , SCREAMING_SNAKE_CASE_: str , SCREAMING_SNAKE_CASE_: Optional[Any] ) -> int:
'''simple docstring'''
for k in new_eval:
A__ = new_eval[k]
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Optional[int] , SCREAMING_SNAKE_CASE_: List[Any] , SCREAMING_SNAKE_CASE_: Dict , SCREAMING_SNAKE_CASE_: Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
plt.step(__lowerCAmelCase , __lowerCAmelCase , color="b" , alpha=0.2 , where="post" )
plt.fill_between(__lowerCAmelCase , __lowerCAmelCase , step="post" , alpha=0.2 , color="b" )
plt.xlabel("Recall" )
plt.ylabel("Precision" )
plt.xlim([0.0, 1.05] )
plt.ylim([0.0, 1.05] )
plt.title(__lowerCAmelCase )
plt.savefig(__lowerCAmelCase )
plt.clf()
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Any , SCREAMING_SNAKE_CASE_: Optional[int] , SCREAMING_SNAKE_CASE_: Tuple , SCREAMING_SNAKE_CASE_: Tuple , SCREAMING_SNAKE_CASE_: Optional[Any]=None , SCREAMING_SNAKE_CASE_: Tuple=None ) -> int:
'''simple docstring'''
A__ = sorted(__lowerCAmelCase , key=lambda SCREAMING_SNAKE_CASE_ : na_probs[k] )
A__ = 0.0
A__ = 1.0
A__ = 0.0
A__ = [1.0]
A__ = [0.0]
A__ = 0.0
for i, qid in enumerate(__lowerCAmelCase ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
A__ = true_pos / float(i + 1 )
A__ = true_pos / float(__lowerCAmelCase )
if i == len(__lowerCAmelCase ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(__lowerCAmelCase )
recalls.append(__lowerCAmelCase )
if out_image:
plot_pr_curve(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return {"ap": 100.0 * avg_prec}
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Optional[Any] , SCREAMING_SNAKE_CASE_: List[str] , SCREAMING_SNAKE_CASE_: Tuple , SCREAMING_SNAKE_CASE_: List[str] , SCREAMING_SNAKE_CASE_: List[Any] , SCREAMING_SNAKE_CASE_: List[Any] ) -> List[Any]:
'''simple docstring'''
if out_image_dir and not os.path.exists(__lowerCAmelCase ):
os.makedirs(__lowerCAmelCase )
A__ = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
A__ = make_precision_recall_eval(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , out_image=os.path.join(__lowerCAmelCase , "pr_exact.png" ) , title="Precision-Recall curve for Exact Match score" , )
A__ = make_precision_recall_eval(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , out_image=os.path.join(__lowerCAmelCase , "pr_f1.png" ) , title="Precision-Recall curve for F1 score" , )
A__ = {k: float(__lowerCAmelCase ) for k, v in qid_to_has_ans.items()}
A__ = make_precision_recall_eval(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , out_image=os.path.join(__lowerCAmelCase , "pr_oracle.png" ) , title="Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)" , )
merge_eval(__lowerCAmelCase , __lowerCAmelCase , "pr_exact" )
merge_eval(__lowerCAmelCase , __lowerCAmelCase , "pr_f1" )
merge_eval(__lowerCAmelCase , __lowerCAmelCase , "pr_oracle" )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Optional[Any] , SCREAMING_SNAKE_CASE_: Any , SCREAMING_SNAKE_CASE_: Optional[Any] , SCREAMING_SNAKE_CASE_: int ) -> Optional[Any]:
'''simple docstring'''
if not qid_list:
return
A__ = [na_probs[k] for k in qid_list]
A__ = np.ones_like(__lowerCAmelCase ) / float(len(__lowerCAmelCase ) )
plt.hist(__lowerCAmelCase , weights=__lowerCAmelCase , bins=2_0 , range=(0.0, 1.0) )
plt.xlabel("Model probability of no-answer" )
plt.ylabel("Proportion of dataset" )
plt.title(F'Histogram of no-answer probability: {name}' )
plt.savefig(os.path.join(__lowerCAmelCase , F'na_prob_hist_{name}.png' ) )
plt.clf()
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Optional[Any] , SCREAMING_SNAKE_CASE_: Optional[Any] , SCREAMING_SNAKE_CASE_: List[str] , SCREAMING_SNAKE_CASE_: Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
A__ = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
A__ = num_no_ans
A__ = cur_score
A__ = 0.0
A__ = sorted(__lowerCAmelCase , key=lambda SCREAMING_SNAKE_CASE_ : na_probs[k] )
for i, qid in enumerate(__lowerCAmelCase ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
A__ = scores[qid]
else:
if preds[qid]:
A__ = -1
else:
A__ = 0
cur_score += diff
if cur_score > best_score:
A__ = cur_score
A__ = na_probs[qid]
return 100.0 * best_score / len(__lowerCAmelCase ), best_thresh
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Dict , SCREAMING_SNAKE_CASE_: Optional[int] , SCREAMING_SNAKE_CASE_: int , SCREAMING_SNAKE_CASE_: Union[str, Any] , SCREAMING_SNAKE_CASE_: int , SCREAMING_SNAKE_CASE_: Optional[Any] ) -> int:
'''simple docstring'''
A__ , A__ = find_best_thresh(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
A__ , A__ = find_best_thresh(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
A__ = best_exact
A__ = exact_thresh
A__ = best_fa
A__ = fa_thresh
def lowerCAmelCase__ ( ) -> Optional[int]:
'''simple docstring'''
with open(OPTS.data_file ) as f:
A__ = json.load(__lowerCAmelCase )
A__ = dataset_json["data"]
with open(OPTS.pred_file ) as f:
A__ = json.load(__lowerCAmelCase )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
A__ = json.load(__lowerCAmelCase )
else:
A__ = {k: 0.0 for k in preds}
A__ = make_qid_to_has_ans(__lowerCAmelCase ) # maps qid to True/False
A__ = [k for k, v in qid_to_has_ans.items() if v]
A__ = [k for k, v in qid_to_has_ans.items() if not v]
A__ , A__ = get_raw_scores(__lowerCAmelCase , __lowerCAmelCase )
A__ = apply_no_ans_threshold(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , OPTS.na_prob_thresh )
A__ = apply_no_ans_threshold(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , OPTS.na_prob_thresh )
A__ = make_eval_dict(__lowerCAmelCase , __lowerCAmelCase )
if has_ans_qids:
A__ = make_eval_dict(__lowerCAmelCase , __lowerCAmelCase , qid_list=__lowerCAmelCase )
merge_eval(__lowerCAmelCase , __lowerCAmelCase , "HasAns" )
if no_ans_qids:
A__ = make_eval_dict(__lowerCAmelCase , __lowerCAmelCase , qid_list=__lowerCAmelCase )
merge_eval(__lowerCAmelCase , __lowerCAmelCase , "NoAns" )
if OPTS.na_prob_file:
find_all_best_thresh(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , OPTS.out_image_dir )
histogram_na_prob(__lowerCAmelCase , __lowerCAmelCase , OPTS.out_image_dir , "hasAns" )
histogram_na_prob(__lowerCAmelCase , __lowerCAmelCase , OPTS.out_image_dir , "noAns" )
if OPTS.out_file:
with open(OPTS.out_file , "w" ) as f:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
else:
print(json.dumps(__lowerCAmelCase , indent=2 ) )
if __name__ == "__main__":
lowerCAmelCase__ = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use("""Agg""")
import matplotlib.pyplot as plt
main()
| 68 |
import logging
import os
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
from filelock import FileLock
from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available
SCREAMING_SNAKE_CASE__ : Optional[int] = logging.getLogger(__name__)
@dataclass
class lowerCAmelCase__ :
a__ : str
a__ : List[str]
a__ : Optional[List[str]]
@dataclass
class lowerCAmelCase__ :
a__ : List[int]
a__ : List[int]
a__ : Optional[List[int]] = None
a__ : Optional[List[int]] = None
class lowerCAmelCase__ ( __lowercase ):
a__ : Optional[Any] = """train"""
a__ : Optional[int] = """dev"""
a__ : Dict = """test"""
class lowerCAmelCase__ :
@staticmethod
def __A ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Union[Split, str] ) -> List[InputExample]:
raise NotImplementedError
@staticmethod
def __A ( SCREAMING_SNAKE_CASE__ : str ) -> List[str]:
raise NotImplementedError
@staticmethod
def __A ( SCREAMING_SNAKE_CASE__ : List[InputExample] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : PreTrainedTokenizer , SCREAMING_SNAKE_CASE__ : Optional[Any]=False , SCREAMING_SNAKE_CASE__ : List[str]="[CLS]" , SCREAMING_SNAKE_CASE__ : Tuple=1 , SCREAMING_SNAKE_CASE__ : str="[SEP]" , SCREAMING_SNAKE_CASE__ : List[Any]=False , SCREAMING_SNAKE_CASE__ : Union[str, Any]=False , SCREAMING_SNAKE_CASE__ : Tuple=0 , SCREAMING_SNAKE_CASE__ : int=0 , SCREAMING_SNAKE_CASE__ : str=-1_00 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0 , SCREAMING_SNAKE_CASE__ : List[Any]=True , ) -> List[InputFeatures]:
__lowerCamelCase = {label: i for i, label in enumerate(SCREAMING_SNAKE_CASE__ )}
__lowerCamelCase = []
for ex_index, example in enumerate(SCREAMING_SNAKE_CASE__ ):
if ex_index % 1_00_00 == 0:
logger.info('''Writing example %d of %d''' , SCREAMING_SNAKE_CASE__ , len(SCREAMING_SNAKE_CASE__ ) )
__lowerCamelCase = []
__lowerCamelCase = []
for word, label in zip(example.words , example.labels ):
__lowerCamelCase = tokenizer.tokenize(SCREAMING_SNAKE_CASE__ )
# bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space.
if len(SCREAMING_SNAKE_CASE__ ) > 0:
tokens.extend(SCREAMING_SNAKE_CASE__ )
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(SCREAMING_SNAKE_CASE__ ) - 1) )
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
__lowerCamelCase = tokenizer.num_special_tokens_to_add()
if len(SCREAMING_SNAKE_CASE__ ) > max_seq_length - special_tokens_count:
__lowerCamelCase = tokens[: (max_seq_length - special_tokens_count)]
__lowerCamelCase = label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
label_ids += [pad_token_label_id]
__lowerCamelCase = [sequence_a_segment_id] * len(SCREAMING_SNAKE_CASE__ )
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
__lowerCamelCase = [cls_token] + tokens
__lowerCamelCase = [pad_token_label_id] + label_ids
__lowerCamelCase = [cls_token_segment_id] + segment_ids
__lowerCamelCase = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
__lowerCamelCase = [1 if mask_padding_with_zero else 0] * len(SCREAMING_SNAKE_CASE__ )
# Zero-pad up to the sequence length.
__lowerCamelCase = max_seq_length - len(SCREAMING_SNAKE_CASE__ )
if pad_on_left:
__lowerCamelCase = ([pad_token] * padding_length) + input_ids
__lowerCamelCase = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
__lowerCamelCase = ([pad_token_segment_id] * padding_length) + segment_ids
__lowerCamelCase = ([pad_token_label_id] * padding_length) + label_ids
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
assert len(SCREAMING_SNAKE_CASE__ ) == max_seq_length
assert len(SCREAMING_SNAKE_CASE__ ) == max_seq_length
assert len(SCREAMING_SNAKE_CASE__ ) == max_seq_length
assert len(SCREAMING_SNAKE_CASE__ ) == max_seq_length
if ex_index < 5:
logger.info('''*** Example ***''' )
logger.info('''guid: %s''' , example.guid )
logger.info('''tokens: %s''' , ''' '''.join([str(SCREAMING_SNAKE_CASE__ ) for x in tokens] ) )
logger.info('''input_ids: %s''' , ''' '''.join([str(SCREAMING_SNAKE_CASE__ ) for x in input_ids] ) )
logger.info('''input_mask: %s''' , ''' '''.join([str(SCREAMING_SNAKE_CASE__ ) for x in input_mask] ) )
logger.info('''segment_ids: %s''' , ''' '''.join([str(SCREAMING_SNAKE_CASE__ ) for x in segment_ids] ) )
logger.info('''label_ids: %s''' , ''' '''.join([str(SCREAMING_SNAKE_CASE__ ) for x in label_ids] ) )
if "token_type_ids" not in tokenizer.model_input_names:
__lowerCamelCase = None
features.append(
InputFeatures(
input_ids=SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , label_ids=SCREAMING_SNAKE_CASE__ ) )
return features
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
class lowerCAmelCase__ ( __lowercase ):
a__ : List[InputFeatures]
a__ : int = nn.CrossEntropyLoss().ignore_index
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : TokenClassificationTask , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : PreTrainedTokenizer , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : Union[str, Any]=False , SCREAMING_SNAKE_CASE__ : Split = Split.train , ) -> Union[str, Any]:
# Load data features from cache or dataset file
__lowerCamelCase = os.path.join(
SCREAMING_SNAKE_CASE__ , '''cached_{}_{}_{}'''.format(mode.value , tokenizer.__class__.__name__ , str(SCREAMING_SNAKE_CASE__ ) ) , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
__lowerCamelCase = cached_features_file + '''.lock'''
with FileLock(SCREAMING_SNAKE_CASE__ ):
if os.path.exists(SCREAMING_SNAKE_CASE__ ) and not overwrite_cache:
logger.info(f'''Loading features from cached file {cached_features_file}''' )
__lowerCamelCase = torch.load(SCREAMING_SNAKE_CASE__ )
else:
logger.info(f'''Creating features from dataset file at {data_dir}''' )
__lowerCamelCase = token_classification_task.read_examples_from_file(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# TODO clean up all this to leverage built-in features of tokenizers
__lowerCamelCase = token_classification_task.convert_examples_to_features(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , cls_token_at_end=bool(model_type in ['''xlnet'''] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['''xlnet'''] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=SCREAMING_SNAKE_CASE__ , pad_on_left=bool(tokenizer.padding_side == '''left''' ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info(f'''Saving features into cached file {cached_features_file}''' )
torch.save(self.features , SCREAMING_SNAKE_CASE__ )
def __len__( self : Dict ) -> str:
return len(self.features )
def __getitem__( self : Any , SCREAMING_SNAKE_CASE__ : Dict ) -> InputFeatures:
return self.features[i]
if is_tf_available():
import tensorflow as tf
class lowerCAmelCase__ :
a__ : List[InputFeatures]
a__ : int = -100
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : TokenClassificationTask , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : PreTrainedTokenizer , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : Optional[Any]=False , SCREAMING_SNAKE_CASE__ : Split = Split.train , ) -> List[Any]:
__lowerCamelCase = token_classification_task.read_examples_from_file(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# TODO clean up all this to leverage built-in features of tokenizers
__lowerCamelCase = token_classification_task.convert_examples_to_features(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , cls_token_at_end=bool(model_type in ['''xlnet'''] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['''xlnet'''] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=SCREAMING_SNAKE_CASE__ , pad_on_left=bool(tokenizer.padding_side == '''left''' ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
def gen():
for ex in self.features:
if ex.token_type_ids is None:
yield (
{"input_ids": ex.input_ids, "attention_mask": ex.attention_mask},
ex.label_ids,
)
else:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label_ids,
)
if "token_type_ids" not in tokenizer.model_input_names:
__lowerCamelCase = tf.data.Dataset.from_generator(
SCREAMING_SNAKE_CASE__ , ({'''input_ids''': tf.intaa, '''attention_mask''': tf.intaa}, tf.intaa) , (
{'''input_ids''': tf.TensorShape([None] ), '''attention_mask''': tf.TensorShape([None] )},
tf.TensorShape([None] ),
) , )
else:
__lowerCamelCase = tf.data.Dataset.from_generator(
SCREAMING_SNAKE_CASE__ , ({'''input_ids''': tf.intaa, '''attention_mask''': tf.intaa, '''token_type_ids''': tf.intaa}, tf.intaa) , (
{
'''input_ids''': tf.TensorShape([None] ),
'''attention_mask''': tf.TensorShape([None] ),
'''token_type_ids''': tf.TensorShape([None] ),
},
tf.TensorShape([None] ),
) , )
def __A ( self : Union[str, Any] ) -> Union[str, Any]:
__lowerCamelCase = self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features ) ) )
return self.dataset
def __len__( self : List[Any] ) -> Any:
return len(self.features )
def __getitem__( self : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> InputFeatures:
return self.features[i]
| 270 | 0 |
"""simple docstring"""
from collections import defaultdict
class __lowerCamelCase :
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase ) -> Union[str, Any]:
_a = total # total no of tasks (N)
# DP table will have a dimension of (2^M)*N
# initially all values are set to -1
_a = [
[-1 for i in range(total + 1 )] for j in range(2 ** len(SCREAMING_SNAKE_CASE__ ) )
]
_a = defaultdict(SCREAMING_SNAKE_CASE__ ) # stores the list of persons for each task
# final_mask is used to check if all persons are included by setting all bits
# to 1
_a = (1 << len(SCREAMING_SNAKE_CASE__ )) - 1
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ) -> str:
# if mask == self.finalmask all persons are distributed tasks, return 1
if mask == self.final_mask:
return 1
# if not everyone gets the task and no more tasks are available, return 0
if task_no > self.total_tasks:
return 0
# if case already considered
if self.dp[mask][task_no] != -1:
return self.dp[mask][task_no]
# Number of ways when we don't this task in the arrangement
_a = self.count_ways_until(SCREAMING_SNAKE_CASE__ , task_no + 1 )
# now assign the tasks one by one to all possible persons and recursively
# assign for the remaining tasks.
if task_no in self.task:
for p in self.task[task_no]:
# if p is already given a task
if mask & (1 << p):
continue
# assign this task to p and change the mask value. And recursively
# assign tasks with the new mask value.
total_ways_util += self.count_ways_until(mask | (1 << p) , task_no + 1 )
# save the value.
_a = total_ways_util
return self.dp[mask][task_no]
def _UpperCAmelCase ( self , __UpperCAmelCase ) -> List[Any]:
# Store the list of persons for each task
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
for j in task_performed[i]:
self.task[j].append(SCREAMING_SNAKE_CASE__ )
# call the function to fill the DP table, final answer is stored in dp[0][1]
return self.count_ways_until(0 , 1 )
if __name__ == "__main__":
__snake_case = 5 # total no of tasks (the value of N)
# the list of tasks that can be done by M persons.
__snake_case = [[1, 3, 4], [1, 2, 5], [3, 4]]
print(
AssignmentUsingBitmask(task_performed, total_tasks).count_no_of_ways(
task_performed
)
) | 320 |
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class lowerCAmelCase__ ( __lowercase ):
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : NestedDataStructureLike[PathLike] , SCREAMING_SNAKE_CASE__ : Optional[NamedSplit] = None , SCREAMING_SNAKE_CASE__ : Optional[Features] = None , SCREAMING_SNAKE_CASE__ : str = None , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : Optional[int] = None , **SCREAMING_SNAKE_CASE__ : str , ) -> Union[str, Any]:
super().__init__(
SCREAMING_SNAKE_CASE__ , split=SCREAMING_SNAKE_CASE__ , features=SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ , keep_in_memory=SCREAMING_SNAKE_CASE__ , streaming=SCREAMING_SNAKE_CASE__ , num_proc=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
__lowerCamelCase = path_or_paths if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else {self.split: path_or_paths}
__lowerCamelCase = Text(
cache_dir=SCREAMING_SNAKE_CASE__ , data_files=SCREAMING_SNAKE_CASE__ , features=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
def __A ( self : int ) -> Dict:
# Build iterable dataset
if self.streaming:
__lowerCamelCase = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
__lowerCamelCase = None
__lowerCamelCase = None
__lowerCamelCase = None
__lowerCamelCase = None
self.builder.download_and_prepare(
download_config=SCREAMING_SNAKE_CASE__ , download_mode=SCREAMING_SNAKE_CASE__ , verification_mode=SCREAMING_SNAKE_CASE__ , base_path=SCREAMING_SNAKE_CASE__ , num_proc=self.num_proc , )
__lowerCamelCase = self.builder.as_dataset(
split=self.split , verification_mode=SCREAMING_SNAKE_CASE__ , in_memory=self.keep_in_memory )
return dataset
| 270 | 0 |
import argparse
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
A : Optional[int] = 1_6
A : Tuple = 3_2
def UpperCamelCase ( __magic_name__ : Accelerator , __magic_name__ : int = 16 ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = AutoTokenizer.from_pretrained("""bert-base-cased""" )
lowercase__ = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(__magic_name__ : Tuple ):
# max_length=None => use the model max length (it's actually the default)
lowercase__ = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=__lowerCAmelCase , max_length=__lowerCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowercase__ = datasets.map(
__lowerCAmelCase , batched=__lowerCAmelCase , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowercase__ = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(__magic_name__ : Optional[int] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowercase__ = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowercase__ = 16
elif accelerator.mixed_precision != "no":
lowercase__ = 8
else:
lowercase__ = None
return tokenizer.pad(
__lowerCAmelCase , padding="""longest""" , max_length=__lowerCAmelCase , pad_to_multiple_of=__lowerCAmelCase , return_tensors="""pt""" , )
# Instantiate dataloaders.
lowercase__ = DataLoader(
tokenized_datasets["""train"""] , shuffle=__lowerCAmelCase , collate_fn=__lowerCAmelCase , batch_size=__lowerCAmelCase , drop_last=__lowerCAmelCase )
lowercase__ = DataLoader(
tokenized_datasets["""validation"""] , shuffle=__lowerCAmelCase , collate_fn=__lowerCAmelCase , batch_size=__lowerCAmelCase , drop_last=(accelerator.mixed_precision == """fp8""") , )
return train_dataloader, eval_dataloader
def UpperCamelCase ( __magic_name__ : Optional[int] , __magic_name__ : Union[str, Any] ) -> Any:
"""simple docstring"""
lowercase__ = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowercase__ = config["""lr"""]
lowercase__ = int(config["""num_epochs"""] )
lowercase__ = int(config["""seed"""] )
lowercase__ = int(config["""batch_size"""] )
lowercase__ = evaluate.load("""glue""" , """mrpc""" )
# If the batch size is too big we use gradient accumulation
lowercase__ = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
lowercase__ = batch_size // MAX_GPU_BATCH_SIZE
lowercase__ = MAX_GPU_BATCH_SIZE
set_seed(__lowerCAmelCase )
lowercase__ , lowercase__ = get_dataloaders(__lowerCAmelCase , __lowerCAmelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowercase__ = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=__lowerCAmelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowercase__ = model.to(accelerator.device )
# Instantiate optimizer
lowercase__ = AdamW(params=model.parameters() , lr=__lowerCAmelCase )
# Instantiate scheduler
lowercase__ = get_linear_schedule_with_warmup(
optimizer=__lowerCAmelCase , num_warmup_steps=100 , num_training_steps=(len(__lowerCAmelCase ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = accelerator.prepare(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Now we train the model
for epoch in range(__lowerCAmelCase ):
model.train()
for step, batch in enumerate(__lowerCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
lowercase__ = model(**__lowerCAmelCase )
lowercase__ = outputs.loss
lowercase__ = loss / gradient_accumulation_steps
accelerator.backward(__lowerCAmelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__lowerCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowercase__ = model(**__lowerCAmelCase )
lowercase__ = outputs.logits.argmax(dim=-1 )
lowercase__ , lowercase__ = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=__lowerCAmelCase , references=__lowerCAmelCase , )
lowercase__ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' , __lowerCAmelCase )
def UpperCamelCase ( ) -> Any:
"""simple docstring"""
lowercase__ = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=__lowerCAmelCase , default=__lowerCAmelCase , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
lowercase__ = parser.parse_args()
lowercase__ = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(__lowerCAmelCase , __lowerCAmelCase )
if __name__ == "__main__":
main()
| 305 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 270 | 0 |
'''simple docstring'''
from __future__ import annotations
from statistics import mean
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : list[int] , SCREAMING_SNAKE_CASE__ : list[int] , SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
UpperCAmelCase__ = [0] * no_of_processes
UpperCAmelCase__ = [0] * no_of_processes
# Initialize remaining_time to waiting_time.
for i in range(__lowerCAmelCase ):
UpperCAmelCase__ = burst_time[i]
UpperCAmelCase__ = []
UpperCAmelCase__ = 0
UpperCAmelCase__ = 0
# When processes are not completed,
# A process whose arrival time has passed \
# and has remaining execution time is put into the ready_process.
# The shortest process in the ready_process, target_process is executed.
while completed != no_of_processes:
UpperCAmelCase__ = []
UpperCAmelCase__ = -1
for i in range(__lowerCAmelCase ):
if (arrival_time[i] <= total_time) and (remaining_time[i] > 0):
ready_process.append(__lowerCAmelCase )
if len(__lowerCAmelCase ) > 0:
UpperCAmelCase__ = ready_process[0]
for i in ready_process:
if remaining_time[i] < remaining_time[target_process]:
UpperCAmelCase__ = i
total_time += burst_time[target_process]
completed += 1
UpperCAmelCase__ = 0
UpperCAmelCase__ = (
total_time - arrival_time[target_process] - burst_time[target_process]
)
else:
total_time += 1
return waiting_time
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : list[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list[int] ):
'''simple docstring'''
UpperCAmelCase__ = [0] * no_of_processes
for i in range(__lowerCAmelCase ):
UpperCAmelCase__ = burst_time[i] + waiting_time[i]
return turn_around_time
if __name__ == "__main__":
print('[TEST CASE 01]')
UpperCAmelCase_ = 4
UpperCAmelCase_ = [2, 5, 3, 7]
UpperCAmelCase_ = [0, 0, 0, 0]
UpperCAmelCase_ = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
UpperCAmelCase_ = calculate_turnaroundtime(
burst_time, no_of_processes, waiting_time
)
# Printing the Result
print('PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time')
for i, process_id in enumerate(list(range(1, 5))):
print(
f"{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t"
f"{waiting_time[i]}\t\t\t\t{turn_around_time[i]}"
)
print(f"\nAverage waiting time = {mean(waiting_time):.5f}")
print(f"Average turnaround time = {mean(turn_around_time):.5f}")
| 346 |
import sacrebleu as scb
from packaging import version
from sacrebleu import CHRF
import datasets
SCREAMING_SNAKE_CASE__ : Union[str, Any] = "\\n@inproceedings{popovic-2015-chrf,\n title = \"chr{F}: character n-gram {F}-score for automatic {MT} evaluation\",\n author = \"Popovi{\'c}, Maja\",\n booktitle = \"Proceedings of the Tenth Workshop on Statistical Machine Translation\",\n month = sep,\n year = \"2015\",\n address = \"Lisbon, Portugal\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/W15-3049\",\n doi = \"10.18653/v1/W15-3049\",\n pages = \"392--395\",\n}\n@inproceedings{popovic-2017-chrf,\n title = \"chr{F}++: words helping character n-grams\",\n author = \"Popovi{\'c}, Maja\",\n booktitle = \"Proceedings of the Second Conference on Machine Translation\",\n month = sep,\n year = \"2017\",\n address = \"Copenhagen, Denmark\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/W17-4770\",\n doi = \"10.18653/v1/W17-4770\",\n pages = \"612--618\",\n}\n@inproceedings{post-2018-call,\n title = \"A Call for Clarity in Reporting {BLEU} Scores\",\n author = \"Post, Matt\",\n booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",\n month = oct,\n year = \"2018\",\n address = \"Belgium, Brussels\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W18-6319\",\n pages = \"186--191\",\n}\n"
SCREAMING_SNAKE_CASE__ : int = "\\nChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,\nand ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation\nthat is already present in sacrebleu.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu's required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.\n"
SCREAMING_SNAKE_CASE__ : Optional[Any] = "\nProduces ChrF(++) scores for hypotheses given reference translations.\n\nArgs:\n predictions (list of str): The predicted sentences.\n references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.\n char_order (int): Character n-gram order. Defaults to `6`.\n word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.\n beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.\n lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.\n whitespace (bool): If `True`, include whitespaces when extracting character n-grams.\n eps_smoothing (bool): If `True`, applies epsilon smoothing similar\n to reference chrF++.py, NLTK and Moses implementations. If `False`,\n it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.\n\nReturns:\n 'score' (float): The chrF (chrF++) score,\n 'char_order' (int): The character n-gram order,\n 'word_order' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,\n 'beta' (int): Determine the importance of recall w.r.t precision\n\nExamples:\n Example 1--a simple example of calculating chrF:\n >>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]\n >>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]\n >>> chrf = datasets.load_metric(\"chrf\")\n >>> results = chrf.compute(predictions=prediction, references=reference)\n >>> print(results)\n {'score': 84.64214891738334, 'char_order': 6, 'word_order': 0, 'beta': 2}\n\n Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:\n >>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]\n >>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]\n >>> chrf = datasets.load_metric(\"chrf\")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2)\n >>> print(results)\n {'score': 82.87263732906315, 'char_order': 6, 'word_order': 2, 'beta': 2}\n\n Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:\n >>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]\n >>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]\n >>> chrf = datasets.load_metric(\"chrf\")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2,\n ... lowercase=True)\n >>> print(results)\n {'score': 92.12853119829202, 'char_order': 6, 'word_order': 2, 'beta': 2}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase__ ( datasets.Metric ):
def __A ( self : Dict ) -> Tuple:
if version.parse(scb.__version__ ) < version.parse('''1.4.12''' ):
raise ImportWarning(
'''To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n'''
'''You can install it with `pip install "sacrebleu>=1.4.12"`.''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://github.com/mjpost/sacreBLEU#chrf--chrf''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''' ) , id='''references''' ),
} ) , codebase_urls=['''https://github.com/mjpost/sacreBLEU#chrf--chrf'''] , reference_urls=[
'''https://github.com/m-popovic/chrF''',
] , )
def __A ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : int = CHRF.CHAR_ORDER , SCREAMING_SNAKE_CASE__ : int = CHRF.WORD_ORDER , SCREAMING_SNAKE_CASE__ : int = CHRF.BETA , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : bool = False , ) -> Optional[Any]:
__lowerCamelCase = len(references[0] )
if any(len(SCREAMING_SNAKE_CASE__ ) != references_per_prediction for refs in references ):
raise ValueError('''Sacrebleu requires the same number of references for each prediction''' )
__lowerCamelCase = [[refs[i] for refs in references] for i in range(SCREAMING_SNAKE_CASE__ )]
__lowerCamelCase = CHRF(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = sb_chrf.corpus_score(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return {
"score": output.score,
"char_order": output.char_order,
"word_order": output.word_order,
"beta": output.beta,
}
| 270 | 0 |
from collections import Counter
from timeit import timeit
def lowercase_ ( _A : str = "" , ):
"""simple docstring"""
return sum(c % 2 for c in Counter(input_str.replace(" " , "" ).lower() ).values() ) < 2
def lowercase_ ( _A : str = "" ):
"""simple docstring"""
if len(__lowerCAmelCase ) == 0:
return True
lowerCamelCase__ : Tuple = input_str.replace(" " , "" ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
lowerCamelCase__ : List[Any] = {}
for character in lower_case_input_str:
lowerCamelCase__ : Tuple = character_freq_dict.get(__lowerCAmelCase , 0 ) + 1
lowerCamelCase__ : Optional[int] = 0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def lowercase_ ( _A : str = "" ):
"""simple docstring"""
print("\nFor string = " , __lowerCAmelCase , ":" )
print(
"> can_string_be_rearranged_as_palindrome_counter()" , "\tans =" , can_string_be_rearranged_as_palindrome_counter(__lowerCAmelCase ) , "\ttime =" , timeit(
"z.can_string_be_rearranged_as_palindrome_counter(z.check_str)" , setup="import __main__ as z" , ) , "seconds" , )
print(
"> can_string_be_rearranged_as_palindrome()" , "\tans =" , can_string_be_rearranged_as_palindrome(__lowerCAmelCase ) , "\ttime =" , timeit(
"z.can_string_be_rearranged_as_palindrome(z.check_str)" , setup="import __main__ as z" , ) , "seconds" , )
if __name__ == "__main__":
A : str = input(
"Enter string to determine if it can be rearranged as a palindrome or not: "
).strip()
benchmark(check_str)
A : str = can_string_be_rearranged_as_palindrome_counter(check_str)
print(f'{check_str} can {"" if status else "not "}be rearranged as a palindrome')
| 184 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE__ : List[Any] = {
"configuration_resnet": ["RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "ResNetConfig", "ResNetOnnxConfig"]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : int = [
"RESNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"ResNetForImageClassification",
"ResNetModel",
"ResNetPreTrainedModel",
"ResNetBackbone",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : List[Any] = [
"TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFResNetForImageClassification",
"TFResNetModel",
"TFResNetPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [
"FlaxResNetForImageClassification",
"FlaxResNetModel",
"FlaxResNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_resnet import (
RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
ResNetBackbone,
ResNetForImageClassification,
ResNetModel,
ResNetPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_resnet import (
TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFResNetForImageClassification,
TFResNetModel,
TFResNetPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel
else:
import sys
SCREAMING_SNAKE_CASE__ : Any = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 270 | 0 |
"""simple docstring"""
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
"""The `image_to_image.py` script is outdated. Please use directly `from diffusers import"""
""" StableDiffusionImg2ImgPipeline` instead."""
) | 126 |
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
SCREAMING_SNAKE_CASE__ : str = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class lowerCAmelCase__ ( unittest.TestCase ):
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[Any]=7 , SCREAMING_SNAKE_CASE__ : List[str]=3 , SCREAMING_SNAKE_CASE__ : Tuple=18 , SCREAMING_SNAKE_CASE__ : int=30 , SCREAMING_SNAKE_CASE__ : Any=4_00 , SCREAMING_SNAKE_CASE__ : Any=None , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=None , ) -> Union[str, Any]:
__lowerCamelCase = size if size is not None else {'''height''': 20, '''width''': 20}
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = num_channels
__lowerCamelCase = image_size
__lowerCamelCase = min_resolution
__lowerCamelCase = max_resolution
__lowerCamelCase = size
__lowerCamelCase = do_normalize
__lowerCamelCase = do_convert_rgb
__lowerCamelCase = [5_12, 10_24, 20_48, 40_96]
__lowerCamelCase = patch_size if patch_size is not None else {'''height''': 16, '''width''': 16}
def __A ( self : Union[str, Any] ) -> Union[str, Any]:
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def __A ( self : int ) -> Any:
__lowerCamelCase = '''https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg'''
__lowerCamelCase = Image.open(requests.get(SCREAMING_SNAKE_CASE__ , stream=SCREAMING_SNAKE_CASE__ ).raw ).convert('''RGB''' )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason="""`Pix2StructImageProcessor` requires `torch>=1.11.0`.""" , )
@require_torch
@require_vision
class lowerCAmelCase__ ( __lowercase , unittest.TestCase ):
a__ : Dict = PixaStructImageProcessor if is_vision_available() else None
def __A ( self : Optional[Any] ) -> List[Any]:
__lowerCamelCase = PixaStructImageProcessingTester(self )
@property
def __A ( self : Optional[int] ) -> Optional[int]:
return self.image_processor_tester.prepare_image_processor_dict()
def __A ( self : Tuple ) -> Dict:
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''do_normalize''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''do_convert_rgb''' ) )
def __A ( self : int ) -> str:
__lowerCamelCase = self.image_processor_tester.prepare_dummy_image()
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
__lowerCamelCase = 20_48
__lowerCamelCase = image_processor(SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE__ )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0606 ) , atol=1e-3 , rtol=1e-3 ) )
def __A ( self : Union[str, Any] ) -> Dict:
# Initialize image_processor
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , Image.Image )
# Test not batched input
__lowerCamelCase = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
__lowerCamelCase = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__lowerCamelCase = image_processor(
SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __A ( self : Dict ) -> str:
# Initialize image_processor
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , Image.Image )
# Test not batched input
__lowerCamelCase = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
__lowerCamelCase = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE__ ).flattened_patches
__lowerCamelCase = '''Hello'''
__lowerCamelCase = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE__ , header_text=SCREAMING_SNAKE_CASE__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__lowerCamelCase = image_processor(
SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE__ , header_text=SCREAMING_SNAKE_CASE__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __A ( self : List[str] ) -> Any:
# Initialize image_processor
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ , numpify=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , np.ndarray )
__lowerCamelCase = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
__lowerCamelCase = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__lowerCamelCase = image_processor(
SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __A ( self : Tuple ) -> List[str]:
# Initialize image_processor
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ , torchify=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , torch.Tensor )
# Test not batched input
__lowerCamelCase = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
__lowerCamelCase = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__lowerCamelCase = image_processor(
SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason="""`Pix2StructImageProcessor` requires `torch>=1.11.0`.""" , )
@require_torch
@require_vision
class lowerCAmelCase__ ( __lowercase , unittest.TestCase ):
a__ : Any = PixaStructImageProcessor if is_vision_available() else None
def __A ( self : Union[str, Any] ) -> Optional[Any]:
__lowerCamelCase = PixaStructImageProcessingTester(self , num_channels=4 )
__lowerCamelCase = 3
@property
def __A ( self : List[Any] ) -> Optional[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def __A ( self : Union[str, Any] ) -> List[str]:
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''do_normalize''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''do_convert_rgb''' ) )
def __A ( self : Optional[int] ) -> str:
# Initialize image_processor
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , Image.Image )
# Test not batched input
__lowerCamelCase = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
__lowerCamelCase = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__lowerCamelCase = image_processor(
SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
| 270 | 0 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
_lowercase : int = None
_lowercase : Any = logging.get_logger(__name__)
_lowercase : List[Any] = {"vocab_file": "sentencepiece.model", "tokenizer_file": "tokenizer.json"}
_lowercase : Dict = {
"vocab_file": {
"google/rembert": "https://huggingface.co/google/rembert/resolve/main/sentencepiece.model",
},
"tokenizer_file": {
"google/rembert": "https://huggingface.co/google/rembert/resolve/main/tokenizer.json",
},
}
_lowercase : int = {
"google/rembert": 2_5_6,
}
_lowercase : Optional[int] = "▁"
class __SCREAMING_SNAKE_CASE ( __lowercase ):
'''simple docstring'''
_a = VOCAB_FILES_NAMES
_a = PRETRAINED_VOCAB_FILES_MAP
_a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a = RemBertTokenizer
def __init__( self : Optional[Any], lowerCamelCase : Optional[Any]=None, lowerCamelCase : Any=None, lowerCamelCase : Tuple=True, lowerCamelCase : int=True, lowerCamelCase : int=False, lowerCamelCase : int="[CLS]", lowerCamelCase : str="[SEP]", lowerCamelCase : Any="<unk>", lowerCamelCase : Dict="[SEP]", lowerCamelCase : List[str]="<pad>", lowerCamelCase : str="[CLS]", lowerCamelCase : List[Any]="[MASK]", **lowerCamelCase : Union[str, Any], )-> Tuple:
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase__ : Union[str, Any] =AddedToken(SCREAMING_SNAKE_CASE__, lstrip=SCREAMING_SNAKE_CASE__, rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) else mask_token
super().__init__(
SCREAMING_SNAKE_CASE__, tokenizer_file=SCREAMING_SNAKE_CASE__, do_lower_case=SCREAMING_SNAKE_CASE__, remove_space=SCREAMING_SNAKE_CASE__, keep_accents=SCREAMING_SNAKE_CASE__, bos_token=SCREAMING_SNAKE_CASE__, eos_token=SCREAMING_SNAKE_CASE__, unk_token=SCREAMING_SNAKE_CASE__, sep_token=SCREAMING_SNAKE_CASE__, pad_token=SCREAMING_SNAKE_CASE__, cls_token=SCREAMING_SNAKE_CASE__, mask_token=SCREAMING_SNAKE_CASE__, **SCREAMING_SNAKE_CASE__, )
lowerCamelCase__ : Dict =do_lower_case
lowerCamelCase__ : Union[str, Any] =remove_space
lowerCamelCase__ : Tuple =keep_accents
lowerCamelCase__ : Tuple =vocab_file
lowerCamelCase__ : Optional[Any] =False if not self.vocab_file else True
def snake_case ( self : Any, lowerCamelCase : List[int], lowerCamelCase : Optional[List[int]] = None )-> List[int]:
lowerCamelCase__ : List[str] =[self.sep_token_id]
lowerCamelCase__ : Optional[Any] =[self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def snake_case ( self : Union[str, Any], lowerCamelCase : List[int], lowerCamelCase : Optional[List[int]] = None, lowerCamelCase : bool = False )-> List[int]:
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1]
def snake_case ( self : List[str], lowerCamelCase : List[int], lowerCamelCase : Optional[List[int]] = None )-> List[int]:
lowerCamelCase__ : int =[self.sep_token_id]
lowerCamelCase__ : List[Any] =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def snake_case ( self : Dict, lowerCamelCase : str, lowerCamelCase : Optional[str] = None )-> Tuple[str]:
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
logger.error('''Vocabulary path ({}) should be a directory'''.format(SCREAMING_SNAKE_CASE__ ) )
return
lowerCamelCase__ : Union[str, Any] =os.path.join(
SCREAMING_SNAKE_CASE__, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE__ ):
copyfile(self.vocab_file, SCREAMING_SNAKE_CASE__ )
return (out_vocab_file,)
| 238 |
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class lowerCAmelCase__ ( __lowercase ):
def __init__( self : int ) -> Optional[int]:
__lowerCamelCase = []
def __A ( self : Any , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str , **SCREAMING_SNAKE_CASE__ : List[Any] ) -> Tuple:
self.events.append('''on_init_end''' )
def __A ( self : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , **SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Any:
self.events.append('''on_train_begin''' )
def __A ( self : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Dict , **SCREAMING_SNAKE_CASE__ : int ) -> Any:
self.events.append('''on_train_end''' )
def __A ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , **SCREAMING_SNAKE_CASE__ : List[Any] ) -> int:
self.events.append('''on_epoch_begin''' )
def __A ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[Any] , **SCREAMING_SNAKE_CASE__ : Tuple ) -> Optional[Any]:
self.events.append('''on_epoch_end''' )
def __A ( self : str , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , **SCREAMING_SNAKE_CASE__ : str ) -> List[Any]:
self.events.append('''on_step_begin''' )
def __A ( self : Any , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Any , **SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Tuple:
self.events.append('''on_step_end''' )
def __A ( self : int , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int , **SCREAMING_SNAKE_CASE__ : List[Any] ) -> Union[str, Any]:
self.events.append('''on_evaluate''' )
def __A ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : str , **SCREAMING_SNAKE_CASE__ : str ) -> str:
self.events.append('''on_predict''' )
def __A ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , **SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Tuple:
self.events.append('''on_save''' )
def __A ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , **SCREAMING_SNAKE_CASE__ : List[str] ) -> List[str]:
self.events.append('''on_log''' )
def __A ( self : int , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[Any] , **SCREAMING_SNAKE_CASE__ : Optional[int] ) -> str:
self.events.append('''on_prediction_step''' )
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
def __A ( self : Union[str, Any] ) -> Optional[Any]:
__lowerCamelCase = tempfile.mkdtemp()
def __A ( self : int ) -> List[str]:
shutil.rmtree(self.output_dir )
def __A ( self : Tuple , SCREAMING_SNAKE_CASE__ : List[Any]=0 , SCREAMING_SNAKE_CASE__ : Any=0 , SCREAMING_SNAKE_CASE__ : List[str]=64 , SCREAMING_SNAKE_CASE__ : Optional[int]=64 , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : Optional[int]=False , **SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Union[str, Any]:
# disable_tqdm in TrainingArguments has a flaky default since it depends on the level of logging. We make sure
# its set to False since the tests later on depend on its value.
__lowerCamelCase = RegressionDataset(length=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = RegressionDataset(length=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = RegressionModelConfig(a=SCREAMING_SNAKE_CASE__ , b=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = RegressionPreTrainedModel(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = TrainingArguments(self.output_dir , disable_tqdm=SCREAMING_SNAKE_CASE__ , report_to=[] , **SCREAMING_SNAKE_CASE__ )
return Trainer(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , train_dataset=SCREAMING_SNAKE_CASE__ , eval_dataset=SCREAMING_SNAKE_CASE__ , callbacks=SCREAMING_SNAKE_CASE__ , )
def __A ( self : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : str ) -> Optional[int]:
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , len(SCREAMING_SNAKE_CASE__ ) )
# Order doesn't matter
__lowerCamelCase = sorted(SCREAMING_SNAKE_CASE__ , key=lambda SCREAMING_SNAKE_CASE__ : cb.__name__ if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else cb.__class__.__name__ )
__lowerCamelCase = sorted(SCREAMING_SNAKE_CASE__ , key=lambda SCREAMING_SNAKE_CASE__ : cb.__name__ if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else cb.__class__.__name__ )
for cba, cba in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
self.assertEqual(SCREAMING_SNAKE_CASE__ , cba.__class__ )
elif not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
self.assertEqual(cba.__class__ , SCREAMING_SNAKE_CASE__ )
else:
self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __A ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Tuple:
__lowerCamelCase = ['''on_init_end''', '''on_train_begin''']
__lowerCamelCase = 0
__lowerCamelCase = len(trainer.get_eval_dataloader() )
__lowerCamelCase = ['''on_prediction_step'''] * len(trainer.get_eval_dataloader() ) + ['''on_log''', '''on_evaluate''']
for _ in range(trainer.state.num_train_epochs ):
expected_events.append('''on_epoch_begin''' )
for _ in range(SCREAMING_SNAKE_CASE__ ):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append('''on_log''' )
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append('''on_save''' )
expected_events.append('''on_epoch_end''' )
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def __A ( self : Union[str, Any] ) -> int:
__lowerCamelCase = self.get_trainer()
__lowerCamelCase = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE__ )
# Callbacks passed at init are added to the default callbacks
__lowerCamelCase = self.get_trainer(callbacks=[MyTestTrainerCallback] )
expected_callbacks.append(SCREAMING_SNAKE_CASE__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE__ )
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
__lowerCamelCase = self.get_trainer(disable_tqdm=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE__ )
def __A ( self : List[Any] ) -> str:
__lowerCamelCase = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
__lowerCamelCase = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(SCREAMING_SNAKE_CASE__ )
expected_callbacks.remove(SCREAMING_SNAKE_CASE__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = self.get_trainer()
__lowerCamelCase = trainer.pop_callback(SCREAMING_SNAKE_CASE__ )
self.assertEqual(cb.__class__ , SCREAMING_SNAKE_CASE__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE__ )
trainer.add_callback(SCREAMING_SNAKE_CASE__ )
expected_callbacks.insert(0 , SCREAMING_SNAKE_CASE__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE__ )
# We can also add, pop, or remove by instance
__lowerCamelCase = self.get_trainer()
__lowerCamelCase = trainer.callback_handler.callbacks[0]
trainer.remove_callback(SCREAMING_SNAKE_CASE__ )
expected_callbacks.remove(SCREAMING_SNAKE_CASE__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = self.get_trainer()
__lowerCamelCase = trainer.callback_handler.callbacks[0]
__lowerCamelCase = trainer.pop_callback(SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE__ )
trainer.add_callback(SCREAMING_SNAKE_CASE__ )
expected_callbacks.insert(0 , SCREAMING_SNAKE_CASE__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE__ )
def __A ( self : Union[str, Any] ) -> Any:
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action='''ignore''' , category=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = self.get_trainer(callbacks=[MyTestTrainerCallback] )
trainer.train()
__lowerCamelCase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(SCREAMING_SNAKE_CASE__ , self.get_expected_events(SCREAMING_SNAKE_CASE__ ) )
# Independent log/save/eval
__lowerCamelCase = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5 )
trainer.train()
__lowerCamelCase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(SCREAMING_SNAKE_CASE__ , self.get_expected_events(SCREAMING_SNAKE_CASE__ ) )
__lowerCamelCase = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5 )
trainer.train()
__lowerCamelCase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(SCREAMING_SNAKE_CASE__ , self.get_expected_events(SCREAMING_SNAKE_CASE__ ) )
__lowerCamelCase = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy='''steps''' )
trainer.train()
__lowerCamelCase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(SCREAMING_SNAKE_CASE__ , self.get_expected_events(SCREAMING_SNAKE_CASE__ ) )
__lowerCamelCase = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy='''epoch''' )
trainer.train()
__lowerCamelCase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(SCREAMING_SNAKE_CASE__ , self.get_expected_events(SCREAMING_SNAKE_CASE__ ) )
# A bit of everything
__lowerCamelCase = self.get_trainer(
callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=10 , eval_steps=5 , evaluation_strategy='''steps''' , )
trainer.train()
__lowerCamelCase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(SCREAMING_SNAKE_CASE__ , self.get_expected_events(SCREAMING_SNAKE_CASE__ ) )
# warning should be emitted for duplicated callbacks
with patch('''transformers.trainer_callback.logger.warning''' ) as warn_mock:
__lowerCamelCase = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , )
assert str(SCREAMING_SNAKE_CASE__ ) in warn_mock.call_args[0][0]
| 270 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowercase__ : Union[str, Any] = {
"configuration_encodec": [
"ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP",
"EncodecConfig",
],
"feature_extraction_encodec": ["EncodecFeatureExtractor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : List[str] = [
"ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST",
"EncodecModel",
"EncodecPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
lowercase__ : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 187 |
import numpy as np
from cva import destroyAllWindows, imread, imshow, waitKey
class lowerCAmelCase__ :
def __init__( self : Any , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ) -> Dict:
if dst_width < 0 or dst_height < 0:
raise ValueError('''Destination width/height should be > 0''' )
__lowerCamelCase = img
__lowerCamelCase = img.shape[1]
__lowerCamelCase = img.shape[0]
__lowerCamelCase = dst_width
__lowerCamelCase = dst_height
__lowerCamelCase = self.src_w / self.dst_w
__lowerCamelCase = self.src_h / self.dst_h
__lowerCamelCase = __lowerCamelCase = (
np.ones((self.dst_h, self.dst_w, 3) , np.uinta ) * 2_55
)
def __A ( self : List[Any] ) -> str:
for i in range(self.dst_h ):
for j in range(self.dst_w ):
__lowerCamelCase = self.img[self.get_y(SCREAMING_SNAKE_CASE__ )][self.get_x(SCREAMING_SNAKE_CASE__ )]
def __A ( self : str , SCREAMING_SNAKE_CASE__ : int ) -> int:
return int(self.ratio_x * x )
def __A ( self : str , SCREAMING_SNAKE_CASE__ : int ) -> int:
return int(self.ratio_y * y )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = 800, 600
SCREAMING_SNAKE_CASE__ : int = imread("image_data/lena.jpg", 1)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = NearestNeighbour(im, dst_w, dst_h)
n.process()
imshow(
F'Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}', n.output
)
waitKey(0)
destroyAllWindows()
| 270 | 0 |
def __lowerCamelCase ( snake_case__ ) -> list:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = len(__lowerCAmelCase )
for i in range(1 ,__lowerCAmelCase ):
_SCREAMING_SNAKE_CASE = collection[i]
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = i - 1
while low <= high:
_SCREAMING_SNAKE_CASE = (low + high) // 2
if val < collection[mid]:
_SCREAMING_SNAKE_CASE = mid - 1
else:
_SCREAMING_SNAKE_CASE = mid + 1
for j in range(__lowerCAmelCase ,__lowerCAmelCase ,-1 ):
_SCREAMING_SNAKE_CASE = collection[j - 1]
_SCREAMING_SNAKE_CASE = val
return collection
if __name__ == "__main__":
UpperCamelCase = input('''Enter numbers separated by a comma:\n''').strip()
UpperCamelCase = [int(item) for item in user_input.split(''',''')]
print(binary_insertion_sort(unsorted))
| 306 |
from queue import PriorityQueue
from typing import Any
import numpy as np
def __magic_name__ ( __lowerCAmelCase : dict , __lowerCAmelCase : str , __lowerCAmelCase : set , __lowerCAmelCase : set , __lowerCAmelCase : dict , __lowerCAmelCase : dict , __lowerCAmelCase : PriorityQueue , __lowerCAmelCase : dict , __lowerCAmelCase : float | int , ) -> float | int:
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
__lowerCamelCase = cst_fwd.get(__lowerCAmelCase , np.inf )
__lowerCamelCase = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
__lowerCamelCase = new_cost_f
__lowerCamelCase = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
__lowerCamelCase = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def __magic_name__ ( __lowerCAmelCase : str , __lowerCAmelCase : str , __lowerCAmelCase : dict , __lowerCAmelCase : dict ) -> int:
__lowerCamelCase = -1
__lowerCamelCase = set()
__lowerCamelCase = set()
__lowerCamelCase = {source: 0}
__lowerCamelCase = {destination: 0}
__lowerCamelCase = {source: None}
__lowerCamelCase = {destination: None}
__lowerCamelCase = PriorityQueue()
__lowerCamelCase = PriorityQueue()
__lowerCamelCase = np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
__lowerCamelCase , __lowerCamelCase = queue_forward.get()
visited_forward.add(__lowerCAmelCase )
__lowerCamelCase , __lowerCamelCase = queue_backward.get()
visited_backward.add(__lowerCAmelCase )
__lowerCamelCase = pass_and_relaxation(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , )
__lowerCamelCase = pass_and_relaxation(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , )
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
__lowerCamelCase = shortest_distance
return shortest_path_distance
SCREAMING_SNAKE_CASE__ : List[Any] = {
"B": [["C", 1]],
"C": [["D", 1]],
"D": [["F", 1]],
"E": [["B", 1], ["G", 2]],
"F": [],
"G": [["F", 1]],
}
SCREAMING_SNAKE_CASE__ : Optional[int] = {
"B": [["E", 1]],
"C": [["B", 1]],
"D": [["C", 1]],
"F": [["D", 1], ["G", 1]],
"E": [[None, np.inf]],
"G": [["E", 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 270 | 0 |
'''simple docstring'''
import inspect
import jax
import jax.lax as lax
import jax.numpy as jnp
from ..utils import add_start_docstrings
from ..utils.logging import get_logger
lowerCAmelCase : Dict =get_logger(__name__)
lowerCAmelCase : List[str] =r"\n Args:\n input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`):\n Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam\n search or log softmax for each vocabulary token when using beam search\n kwargs (`Dict[str, Any]`, *optional*):\n Additional logits processor specific kwargs.\n\n Return:\n `jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.\n\n"
class a_ :
@add_start_docstrings(SCREAMING_SNAKE_CASE__ )
def __call__( self : int , lowercase : jnp.ndarray , lowercase : jnp.ndarray ):
"""simple docstring"""
raise NotImplementedError(
F'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' )
class a_ :
@add_start_docstrings(SCREAMING_SNAKE_CASE__ )
def __call__( self : Optional[Any] , lowercase : jnp.ndarray , lowercase : jnp.ndarray ):
"""simple docstring"""
raise NotImplementedError(
F'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' )
class a_ ( __lowercase ):
@add_start_docstrings(SCREAMING_SNAKE_CASE__ )
def __call__( self : Union[str, Any] , lowercase : jnp.ndarray , lowercase : jnp.ndarray , lowercase : int , **lowercase : Union[str, Any] ):
"""simple docstring"""
for processor in self:
lowercase_ :Optional[int] = inspect.signature(processor.__call__ ).parameters
if len(SCREAMING_SNAKE_CASE__ ) > 3:
if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ):
raise ValueError(
F'Make sure that all the required parameters: {list(function_args.keys() )} for '
F'{processor.__class__} are passed to the logits processor.' )
lowercase_ :int = processor(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
else:
lowercase_ :str = processor(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return scores
class a_ ( __lowercase ):
def __init__( self : int , lowercase : float ):
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) or not (temperature > 0):
raise ValueError(F'`temperature` has to be a strictly positive float, but is {temperature}' )
lowercase_ :int = temperature
def __call__( self : List[str] , lowercase : jnp.ndarray , lowercase : jnp.ndarray , lowercase : int ):
"""simple docstring"""
lowercase_ :Optional[int] = scores / self.temperature
return scores
class a_ ( __lowercase ):
def __init__( self : Tuple , lowercase : float , lowercase : float = -float("Inf" ) , lowercase : int = 1 ):
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) or (top_p < 0 or top_p > 1.0):
raise ValueError(F'`top_p` has to be a float > 0 and < 1, but is {top_p}' )
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) or (min_tokens_to_keep < 1):
raise ValueError(F'`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}' )
lowercase_ :Optional[int] = top_p
lowercase_ :List[Any] = filter_value
lowercase_ :Union[str, Any] = min_tokens_to_keep
def __call__( self : Tuple , lowercase : jnp.ndarray , lowercase : jnp.ndarray , lowercase : int ):
"""simple docstring"""
lowercase_ , lowercase_ :Optional[Any] = lax.top_k(SCREAMING_SNAKE_CASE__ , scores.shape[-1] )
lowercase_ :List[Any] = jnp.full_like(SCREAMING_SNAKE_CASE__ , self.filter_value )
lowercase_ :List[str] = jax.nn.softmax(SCREAMING_SNAKE_CASE__ , axis=-1 ).cumsum(axis=-1 )
lowercase_ :Any = cumulative_probs < self.top_p
# include the token that is higher than top_p as well
lowercase_ :List[Any] = jnp.roll(SCREAMING_SNAKE_CASE__ , 1 )
score_mask |= score_mask.at[:, 0].set(SCREAMING_SNAKE_CASE__ )
# min tokens to keep
lowercase_ :Any = score_mask.at[:, : self.min_tokens_to_keep].set(SCREAMING_SNAKE_CASE__ )
lowercase_ :Optional[Any] = jnp.where(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase_ :List[Any] = jax.lax.sort_key_val(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )[-1]
return next_scores
class a_ ( __lowercase ):
def __init__( self : Optional[Any] , lowercase : int , lowercase : float = -float("Inf" ) , lowercase : int = 1 ):
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) or top_k <= 0:
raise ValueError(F'`top_k` has to be a strictly positive integer, but is {top_k}' )
lowercase_ :Union[str, Any] = max(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase_ :List[Any] = filter_value
def __call__( self : Optional[Any] , lowercase : jnp.ndarray , lowercase : jnp.ndarray , lowercase : int ):
"""simple docstring"""
lowercase_ , lowercase_ :Union[str, Any] = scores.shape
lowercase_ :List[Any] = jnp.full(batch_size * vocab_size , self.filter_value )
lowercase_ :Dict = min(self.top_k , scores.shape[-1] ) # Safety check
lowercase_ , lowercase_ :List[str] = lax.top_k(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase_ :Dict = jnp.broadcast_to((jnp.arange(SCREAMING_SNAKE_CASE__ ) * vocab_size)[:, None] , (batch_size, topk) ).flatten()
lowercase_ :List[Any] = topk_scores.flatten()
lowercase_ :Tuple = topk_indices.flatten() + shift
lowercase_ :str = next_scores_flat.at[topk_indices_flat].set(SCREAMING_SNAKE_CASE__ )
lowercase_ :Any = next_scores_flat.reshape(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return next_scores
class a_ ( __lowercase ):
def __init__( self : Any , lowercase : int ):
"""simple docstring"""
lowercase_ :Optional[Any] = bos_token_id
def __call__( self : Dict , lowercase : jnp.ndarray , lowercase : jnp.ndarray , lowercase : int ):
"""simple docstring"""
lowercase_ :Optional[Any] = jnp.full(scores.shape , -float("inf" ) )
lowercase_ :Any = 1 - jnp.bool_(cur_len - 1 )
lowercase_ :Any = jnp.where(SCREAMING_SNAKE_CASE__ , new_scores.at[:, self.bos_token_id].set(0 ) , SCREAMING_SNAKE_CASE__ )
return scores
class a_ ( __lowercase ):
def __init__( self : Optional[Any] , lowercase : int , lowercase : int ):
"""simple docstring"""
lowercase_ :List[str] = max_length
lowercase_ :Any = eos_token_id
def __call__( self : List[str] , lowercase : jnp.ndarray , lowercase : jnp.ndarray , lowercase : int ):
"""simple docstring"""
lowercase_ :List[Any] = jnp.full(scores.shape , -float("inf" ) )
lowercase_ :Any = 1 - jnp.bool_(cur_len - self.max_length + 1 )
lowercase_ :str = jnp.where(SCREAMING_SNAKE_CASE__ , new_scores.at[:, self.eos_token_id].set(0 ) , SCREAMING_SNAKE_CASE__ )
return scores
class a_ ( __lowercase ):
def __init__( self : str , lowercase : int , lowercase : int ):
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) or min_length < 0:
raise ValueError(F'`min_length` has to be a positive integer, but is {min_length}' )
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) or eos_token_id < 0:
raise ValueError(F'`eos_token_id` has to be a positive integer, but is {eos_token_id}' )
lowercase_ :Optional[int] = min_length
lowercase_ :List[Any] = eos_token_id
def __call__( self : int , lowercase : jnp.ndarray , lowercase : jnp.ndarray , lowercase : int ):
"""simple docstring"""
lowercase_ :Tuple = 1 - jnp.clip(cur_len - self.min_length , 0 , 1 )
lowercase_ :Optional[int] = jnp.where(SCREAMING_SNAKE_CASE__ , scores.at[:, self.eos_token_id].set(-float("inf" ) ) , SCREAMING_SNAKE_CASE__ )
return scores
class a_ ( __lowercase ):
def __init__( self : Any , lowercase : List[str] , lowercase : List[str] ):
"""simple docstring"""
lowercase_ :Optional[int] = list(SCREAMING_SNAKE_CASE__ )
lowercase_ :int = begin_index
def __call__( self : Optional[Any] , lowercase : str , lowercase : int , lowercase : int ):
"""simple docstring"""
lowercase_ :str = 1 - jnp.bool_(cur_len - self.begin_index )
lowercase_ :Optional[Any] = jnp.where(SCREAMING_SNAKE_CASE__ , scores.at[:, self.begin_suppress_tokens].set(-float("inf" ) ) , SCREAMING_SNAKE_CASE__ )
return scores
class a_ ( __lowercase ):
def __init__( self : Any , lowercase : list ):
"""simple docstring"""
lowercase_ :Any = list(SCREAMING_SNAKE_CASE__ )
def __call__( self : Dict , lowercase : jnp.ndarray , lowercase : jnp.ndarray , lowercase : int ):
"""simple docstring"""
lowercase_ :Tuple = scores.at[..., self.suppress_tokens].set(-float("inf" ) )
return scores
class a_ ( __lowercase ):
def __init__( self : Tuple , lowercase : Tuple ):
"""simple docstring"""
lowercase_ :Tuple = dict(SCREAMING_SNAKE_CASE__ )
# Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the
# index of the array corresponds to the index of the token to be forced, for XLA compatibility.
# Indexes without forced tokens will have a negative value.
lowercase_ :Optional[Any] = jnp.ones((max(force_token_map.keys() ) + 1) , dtype=jnp.intaa ) * -1
for index, token in force_token_map.items():
if token is not None:
lowercase_ :Optional[Any] = force_token_array.at[index].set(SCREAMING_SNAKE_CASE__ )
lowercase_ :Tuple = jnp.intaa(SCREAMING_SNAKE_CASE__ )
def __call__( self : int , lowercase : jnp.ndarray , lowercase : jnp.ndarray , lowercase : int ):
"""simple docstring"""
def _force_token(lowercase : Optional[int] ):
lowercase_ :Optional[int] = scores.shape[0]
lowercase_ :List[Any] = self.force_token_array[generation_idx]
lowercase_ :str = jnp.ones_like(SCREAMING_SNAKE_CASE__ , dtype=scores.dtype ) * -float("inf" )
lowercase_ :List[str] = jnp.zeros((batch_size, 1) , dtype=scores.dtype )
lowercase_ :Any = lax.dynamic_update_slice(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , (0, current_token) )
return new_scores
lowercase_ :str = lax.cond(
cur_len >= self.force_token_array.shape[0] , lambda: scores , lambda: lax.cond(
self.force_token_array[cur_len] >= 0 , lambda: _force_token(SCREAMING_SNAKE_CASE__ ) , lambda: scores , ) , )
return scores
class a_ ( __lowercase ):
def __init__( self : Union[str, Any] , lowercase : Optional[Any] , lowercase : int , lowercase : int ):
"""simple docstring"""
lowercase_ :Optional[Any] = generate_config.eos_token_id
lowercase_ :int = generate_config.no_timestamps_token_id
lowercase_ :List[str] = generate_config.no_timestamps_token_id + 1
lowercase_ :List[Any] = decoder_input_length + 1
if generate_config.is_multilingual:
# room for language token and task token
self.begin_index += 2
if hasattr(SCREAMING_SNAKE_CASE__ , "max_initial_timestamp_index" ):
lowercase_ :Optional[int] = generate_config.max_initial_timestamp_index
else:
lowercase_ :Optional[int] = model_config.vocab_size
if self.max_initial_timestamp_index is None:
lowercase_ :str = model_config.vocab_size
def __call__( self : List[str] , lowercase : List[str] , lowercase : List[Any] , lowercase : List[Any] ):
"""simple docstring"""
lowercase_ :Dict = scores.at[:, self.no_timestamps_token_id].set(-float("inf" ) )
def handle_pairs(lowercase : Union[str, Any] , lowercase : Tuple ):
lowercase_ :Tuple = jnp.where((cur_len - self.begin_index) >= 1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase_ :Optional[int] = jnp.where(
input_ids_k[cur_len - 1] >= self.timestamp_begin , True and last_was_timestamp , SCREAMING_SNAKE_CASE__ , )
lowercase_ :Dict = jnp.where((cur_len - self.begin_index) < 2 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase_ :str = jnp.where(
input_ids_k[cur_len - 2] >= self.timestamp_begin , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , )
return jnp.where(
SCREAMING_SNAKE_CASE__ , jnp.where(
penultimate_was_timestamp > 0 , scores_k.at[self.timestamp_begin :].set(-float("inf" ) ) , scores_k.at[: self.eos_token_id].set(-float("inf" ) ) , ) , SCREAMING_SNAKE_CASE__ , )
lowercase_ :List[str] = jax.vmap(SCREAMING_SNAKE_CASE__ )(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase_ :int = jnp.where(cur_len == self.begin_index , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase_ :str = jnp.where(
self.max_initial_timestamp_index is not None , True and apply_max_initial_timestamp , SCREAMING_SNAKE_CASE__ , )
lowercase_ :Optional[int] = self.timestamp_begin + self.max_initial_timestamp_index
lowercase_ :Any = jnp.where(
SCREAMING_SNAKE_CASE__ , scores.at[:, last_allowed + 1 :].set(-float("inf" ) ) , SCREAMING_SNAKE_CASE__ , )
# if sum of probability over timestamps is above any other token, sample timestamp
lowercase_ :Optional[Any] = jax.nn.log_softmax(SCREAMING_SNAKE_CASE__ , axis=-1 )
def handle_cumulative_probs(lowercase : Dict , lowercase : str ):
lowercase_ :Optional[int] = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] , axis=-1 )
lowercase_ :str = jnp.max(logprobs_k[: self.timestamp_begin] )
return jnp.where(
timestamp_logprob > max_text_token_logprob , scores_k.at[: self.timestamp_begin].set(-float("inf" ) ) , SCREAMING_SNAKE_CASE__ , )
lowercase_ :List[Any] = jax.vmap(SCREAMING_SNAKE_CASE__ )(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return scores
| 223 |
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
SCREAMING_SNAKE_CASE__ : List[Any] = TypeVar("T")
def __magic_name__ ( __lowerCAmelCase : int ) -> int:
return (position - 1) // 2
def __magic_name__ ( __lowerCAmelCase : int ) -> int:
return (2 * position) + 1
def __magic_name__ ( __lowerCAmelCase : int ) -> int:
return (2 * position) + 2
class lowerCAmelCase__ ( Generic[T] ):
def __init__( self : List[str] ) -> None:
__lowerCamelCase = []
__lowerCamelCase = {}
__lowerCamelCase = 0
def __len__( self : Optional[int] ) -> int:
return self.elements
def __repr__( self : Optional[int] ) -> str:
return str(self.heap )
def __A ( self : Union[str, Any] ) -> bool:
# Check if the priority queue is empty
return self.elements == 0
def __A ( self : str , SCREAMING_SNAKE_CASE__ : T , SCREAMING_SNAKE_CASE__ : int ) -> None:
# Add an element with given priority to the queue
self.heap.append((elem, weight) )
__lowerCamelCase = self.elements
self.elements += 1
self._bubble_up(SCREAMING_SNAKE_CASE__ )
def __A ( self : Any ) -> T:
# Remove and return the element with lowest weight (highest priority)
if self.elements > 1:
self._swap_nodes(0 , self.elements - 1 )
__lowerCamelCase , __lowerCamelCase = self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
__lowerCamelCase , __lowerCamelCase = self.heap[0]
self._bubble_down(SCREAMING_SNAKE_CASE__ )
return elem
def __A ( self : Dict , SCREAMING_SNAKE_CASE__ : T , SCREAMING_SNAKE_CASE__ : int ) -> None:
# Update the weight of the given key
__lowerCamelCase = self.position_map[elem]
__lowerCamelCase = (elem, weight)
if position > 0:
__lowerCamelCase = get_parent_position(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase , __lowerCamelCase = self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(SCREAMING_SNAKE_CASE__ )
else:
self._bubble_down(SCREAMING_SNAKE_CASE__ )
else:
self._bubble_down(SCREAMING_SNAKE_CASE__ )
def __A ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : T ) -> None:
# Place a node at the proper position (upward movement) [to be used internally
# only]
__lowerCamelCase = self.position_map[elem]
if curr_pos == 0:
return None
__lowerCamelCase = get_parent_position(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase , __lowerCamelCase = self.heap[curr_pos]
__lowerCamelCase , __lowerCamelCase = self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return self._bubble_up(SCREAMING_SNAKE_CASE__ )
return None
def __A ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : T ) -> None:
# Place a node at the proper position (downward movement) [to be used
# internally only]
__lowerCamelCase = self.position_map[elem]
__lowerCamelCase , __lowerCamelCase = self.heap[curr_pos]
__lowerCamelCase = get_child_left_position(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = get_child_right_position(SCREAMING_SNAKE_CASE__ )
if child_left_position < self.elements and child_right_position < self.elements:
__lowerCamelCase , __lowerCamelCase = self.heap[child_left_position]
__lowerCamelCase , __lowerCamelCase = self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return self._bubble_down(SCREAMING_SNAKE_CASE__ )
if child_left_position < self.elements:
__lowerCamelCase , __lowerCamelCase = self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return self._bubble_down(SCREAMING_SNAKE_CASE__ )
else:
return None
if child_right_position < self.elements:
__lowerCamelCase , __lowerCamelCase = self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return self._bubble_down(SCREAMING_SNAKE_CASE__ )
return None
def __A ( self : List[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ) -> None:
# Swap the nodes at the given positions
__lowerCamelCase = self.heap[nodea_pos][0]
__lowerCamelCase = self.heap[nodea_pos][0]
__lowerCamelCase , __lowerCamelCase = (
self.heap[nodea_pos],
self.heap[nodea_pos],
)
__lowerCamelCase = nodea_pos
__lowerCamelCase = nodea_pos
class lowerCAmelCase__ ( Generic[T] ):
def __init__( self : Tuple ) -> None:
__lowerCamelCase = {}
__lowerCamelCase = 0
def __repr__( self : Optional[int] ) -> str:
return str(self.connections )
def __len__( self : List[str] ) -> int:
return self.nodes
def __A ( self : List[str] , SCREAMING_SNAKE_CASE__ : T ) -> None:
# Add a node in the graph if it is not in the graph
if node not in self.connections:
__lowerCamelCase = {}
self.nodes += 1
def __A ( self : Dict , SCREAMING_SNAKE_CASE__ : T , SCREAMING_SNAKE_CASE__ : T , SCREAMING_SNAKE_CASE__ : int ) -> None:
# Add an edge between 2 nodes in the graph
self.add_node(SCREAMING_SNAKE_CASE__ )
self.add_node(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = weight
__lowerCamelCase = weight
def __magic_name__ ( __lowerCAmelCase : GraphUndirectedWeighted[T] , ) -> tuple[dict[T, int], dict[T, T | None]]:
__lowerCamelCase = {node: maxsize for node in graph.connections}
__lowerCamelCase = {node: None for node in graph.connections}
__lowerCamelCase = MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(__lowerCAmelCase , __lowerCAmelCase )
if priority_queue.is_empty():
return dist, parent
# initialization
__lowerCamelCase = priority_queue.extract_min()
__lowerCamelCase = 0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
__lowerCamelCase = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(__lowerCAmelCase , dist[neighbour] )
__lowerCamelCase = node
# running prim's algorithm
while not priority_queue.is_empty():
__lowerCamelCase = priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
__lowerCamelCase = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(__lowerCAmelCase , dist[neighbour] )
__lowerCamelCase = node
return dist, parent
| 270 | 0 |
'''simple docstring'''
import inspect
import os
import re
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
__lowerCAmelCase : List[str] ="src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
__lowerCAmelCase : Dict =direct_transformers_import(PATH_TO_TRANSFORMERS)
__lowerCAmelCase : int =transformers.models.auto.configuration_auto.CONFIG_MAPPING
__lowerCAmelCase : Dict ={
# used to compute the property `self.chunk_length`
"EncodecConfig": ["overlap"],
# used as `self.bert_model = BertModel(config, ...)`
"DPRConfig": True,
# not used in modeling files, but it's an important information
"FSMTConfig": ["langs"],
# used internally in the configuration class file
"GPTNeoConfig": ["attention_types"],
# used internally in the configuration class file
"EsmConfig": ["is_folding_model"],
# used during training (despite we don't have training script for these models yet)
"Mask2FormerConfig": ["ignore_value"],
# `ignore_value` used during training (despite we don't have training script for these models yet)
# `norm` used in conversion script (despite not using in the modeling file)
"OneFormerConfig": ["ignore_value", "norm"],
# used during preprocessing and collation, see `collating_graphormer.py`
"GraphormerConfig": ["spatial_pos_max"],
# used internally in the configuration class file
"T5Config": ["feed_forward_proj"],
# used internally in the configuration class file
# `tokenizer_class` get default value `T5Tokenizer` intentionally
"MT5Config": ["feed_forward_proj", "tokenizer_class"],
"UMT5Config": ["feed_forward_proj", "tokenizer_class"],
# used internally in the configuration class file
"LongT5Config": ["feed_forward_proj"],
# used internally in the configuration class file
"SwitchTransformersConfig": ["feed_forward_proj"],
# having default values other than `1e-5` - we can't fix them without breaking
"BioGptConfig": ["layer_norm_eps"],
# having default values other than `1e-5` - we can't fix them without breaking
"GLPNConfig": ["layer_norm_eps"],
# having default values other than `1e-5` - we can't fix them without breaking
"SegformerConfig": ["layer_norm_eps"],
# having default values other than `1e-5` - we can't fix them without breaking
"CvtConfig": ["layer_norm_eps"],
# having default values other than `1e-5` - we can't fix them without breaking
"PerceiverConfig": ["layer_norm_eps"],
# used internally to calculate the feature size
"InformerConfig": ["num_static_real_features", "num_time_features"],
# used internally to calculate the feature size
"TimeSeriesTransformerConfig": ["num_static_real_features", "num_time_features"],
# used internally to calculate the feature size
"AutoformerConfig": ["num_static_real_features", "num_time_features"],
# used internally to calculate `mlp_dim`
"SamVisionConfig": ["mlp_ratio"],
# For (head) training, but so far not implemented
"ClapAudioConfig": ["num_classes"],
# Not used, but providing useful information to users
"SpeechT5HifiGanConfig": ["sampling_rate"],
}
# TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure
SPECIAL_CASES_TO_ALLOW.update(
{
"CLIPSegConfig": True,
"DeformableDetrConfig": True,
"DetaConfig": True,
"DinatConfig": True,
"DonutSwinConfig": True,
"EfficientFormerConfig": True,
"FSMTConfig": True,
"JukeboxConfig": True,
"LayoutLMv2Config": True,
"MaskFormerSwinConfig": True,
"MT5Config": True,
"NatConfig": True,
"OneFormerConfig": True,
"PerceiverConfig": True,
"RagConfig": True,
"SpeechT5Config": True,
"SwinConfig": True,
"Swin2SRConfig": True,
"Swinv2Config": True,
"SwitchTransformersConfig": True,
"TableTransformerConfig": True,
"TapasConfig": True,
"TransfoXLConfig": True,
"UniSpeechConfig": True,
"UniSpeechSatConfig": True,
"WavLMConfig": True,
"WhisperConfig": True,
# TODO: @Arthur (for `alignment_head` and `alignment_layer`)
"JukeboxPriorConfig": True,
# TODO: @Younes (for `is_decoder`)
"Pix2StructTextConfig": True,
}
)
def UpperCamelCase ( _lowerCamelCase : Any , _lowerCamelCase : Tuple , _lowerCamelCase : List[str] , _lowerCamelCase : Tuple ):
A__ = False
for attribute in attributes:
for modeling_source in source_strings:
# check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)`
if (
F"config.{attribute}" in modeling_source
or F"getattr(config, \"{attribute}\"" in modeling_source
or F"getattr(self.config, \"{attribute}\"" in modeling_source
):
A__ = True
# Deal with multi-line cases
elif (
re.search(
rF"getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*\"{attribute}\"" , __lowerCAmelCase , )
is not None
):
A__ = True
# `SequenceSummary` is called with `SequenceSummary(config)`
elif attribute in [
"summary_type",
"summary_use_proj",
"summary_activation",
"summary_last_dropout",
"summary_proj_to_labels",
"summary_first_dropout",
]:
if "SequenceSummary" in modeling_source:
A__ = True
if attribute_used:
break
if attribute_used:
break
# common and important attributes, even if they do not always appear in the modeling files
A__ = [
"bos_index",
"eos_index",
"pad_index",
"unk_index",
"mask_index",
"image_size",
"use_cache",
"out_features",
"out_indices",
]
A__ = ["encoder_no_repeat_ngram_size"]
# Special cases to be allowed
A__ = True
if not attribute_used:
A__ = False
for attribute in attributes:
# Allow if the default value in the configuration class is different from the one in `PretrainedConfig`
if attribute in ["is_encoder_decoder"] and default_value is True:
A__ = True
elif attribute in ["tie_word_embeddings"] and default_value is False:
A__ = True
# Allow cases without checking the default value in the configuration class
elif attribute in attributes_to_allow + attributes_used_in_generation:
A__ = True
elif attribute.endswith("_token_id" ):
A__ = True
# configuration class specific cases
if not case_allowed:
A__ = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__ , [] )
A__ = allowed_cases is True or attribute in allowed_cases
return attribute_used or case_allowed
def UpperCamelCase ( _lowerCamelCase : str ):
A__ = dict(inspect.signature(config_class.__init__ ).parameters )
A__ = [x for x in list(signature.keys() ) if x not in ["self", "kwargs"]]
A__ = [signature[param].default for param in parameter_names]
# If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long
# as one variant is used, the test should pass
A__ = {}
if len(config_class.attribute_map ) > 0:
A__ = {v: k for k, v in config_class.attribute_map.items()}
# Get the path to modeling source files
A__ = inspect.getsourcefile(__lowerCAmelCase )
A__ = os.path.dirname(__lowerCAmelCase )
# Let's check against all frameworks: as long as one framework uses an attribute, we are good.
A__ = [os.path.join(__lowerCAmelCase , __lowerCAmelCase ) for fn in os.listdir(__lowerCAmelCase ) if fn.startswith("modeling_" )]
# Get the source code strings
A__ = []
for path in modeling_paths:
if os.path.isfile(__lowerCAmelCase ):
with open(__lowerCAmelCase ) as fp:
modeling_sources.append(fp.read() )
A__ = []
for config_param, default_value in zip(__lowerCAmelCase , __lowerCAmelCase ):
# `attributes` here is all the variant names for `config_param`
A__ = [config_param]
# some configuration classes have non-empty `attribute_map`, and both names could be used in the
# corresponding modeling files. As long as one of them appears, it is fine.
if config_param in reversed_attribute_map:
attributes.append(reversed_attribute_map[config_param] )
if not check_attribute_being_used(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
unused_attributes.append(attributes[0] )
return sorted(__lowerCAmelCase )
def UpperCamelCase ( ):
A__ = {}
for _config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in _config_class.__module__:
continue
# Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.)
A__ = [
cls
for name, cls in inspect.getmembers(
inspect.getmodule(_config_class ) , lambda _lowerCamelCase : inspect.isclass(__lowerCAmelCase )
and issubclass(__lowerCAmelCase , __lowerCAmelCase )
and inspect.getmodule(__lowerCAmelCase ) == inspect.getmodule(_config_class ) , )
]
for config_class in config_classes_in_module:
A__ = check_config_attributes_being_used(__lowerCAmelCase )
if len(__lowerCAmelCase ) > 0:
A__ = unused_attributes
if len(__lowerCAmelCase ) > 0:
A__ = "The following configuration classes contain unused attributes in the corresponding modeling files:\n"
for name, attributes in configs_with_unused_attributes.items():
error += F"{name}: {attributes}\n"
raise ValueError(__lowerCAmelCase )
if __name__ == "__main__":
check_config_attributes()
| 237 |
from __future__ import annotations
from statistics import mean
def __magic_name__ ( __lowerCAmelCase : list[int] , __lowerCAmelCase : list[int] , __lowerCAmelCase : int ) -> list[int]:
__lowerCamelCase = [0] * no_of_processes
__lowerCamelCase = [0] * no_of_processes
# Initialize remaining_time to waiting_time.
for i in range(__lowerCAmelCase ):
__lowerCamelCase = burst_time[i]
__lowerCamelCase = []
__lowerCamelCase = 0
__lowerCamelCase = 0
# When processes are not completed,
# A process whose arrival time has passed \
# and has remaining execution time is put into the ready_process.
# The shortest process in the ready_process, target_process is executed.
while completed != no_of_processes:
__lowerCamelCase = []
__lowerCamelCase = -1
for i in range(__lowerCAmelCase ):
if (arrival_time[i] <= total_time) and (remaining_time[i] > 0):
ready_process.append(__lowerCAmelCase )
if len(__lowerCAmelCase ) > 0:
__lowerCamelCase = ready_process[0]
for i in ready_process:
if remaining_time[i] < remaining_time[target_process]:
__lowerCamelCase = i
total_time += burst_time[target_process]
completed += 1
__lowerCamelCase = 0
__lowerCamelCase = (
total_time - arrival_time[target_process] - burst_time[target_process]
)
else:
total_time += 1
return waiting_time
def __magic_name__ ( __lowerCAmelCase : list[int] , __lowerCAmelCase : int , __lowerCAmelCase : list[int] ) -> list[int]:
__lowerCamelCase = [0] * no_of_processes
for i in range(__lowerCAmelCase ):
__lowerCamelCase = burst_time[i] + waiting_time[i]
return turn_around_time
if __name__ == "__main__":
print("[TEST CASE 01]")
SCREAMING_SNAKE_CASE__ : Tuple = 4
SCREAMING_SNAKE_CASE__ : Optional[int] = [2, 5, 3, 7]
SCREAMING_SNAKE_CASE__ : List[str] = [0, 0, 0, 0]
SCREAMING_SNAKE_CASE__ : str = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = calculate_turnaroundtime(
burst_time, no_of_processes, waiting_time
)
# Printing the Result
print("PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time")
for i, process_id in enumerate(list(range(1, 5))):
print(
F'{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t'
F'{waiting_time[i]}\t\t\t\t{turn_around_time[i]}'
)
print(F'\nAverage waiting time = {mean(waiting_time):.5f}')
print(F'Average turnaround time = {mean(turn_around_time):.5f}')
| 270 | 0 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class a__ ( __lowercase ):
"""simple docstring"""
__lowerCamelCase = ["""image_processor""", """tokenizer"""]
__lowerCamelCase = """ViTImageProcessor"""
__lowerCamelCase = ("""CLIPTokenizer""", """CLIPTokenizerFast""")
def __init__( self , lowercase=None , lowercase=None , **lowercase ) -> List[str]:
'''simple docstring'''
A__ = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , SCREAMING_SNAKE_CASE__ , )
A__ = kwargs.pop("feature_extractor" )
A__ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __call__( self , lowercase=None , lowercase=None , lowercase=None , lowercase=None , **lowercase ) -> Optional[int]:
'''simple docstring'''
if text is None and visual_prompt is None and images is None:
raise ValueError("You have to specify either text, visual prompt or images." )
if text is not None and visual_prompt is not None:
raise ValueError("You have to specify exactly one type of prompt. Either text or visual prompt." )
if text is not None:
A__ = self.tokenizer(SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
if visual_prompt is not None:
A__ = self.image_processor(SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
if images is not None:
A__ = self.image_processor(SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
if visual_prompt is not None and images is not None:
A__ = {
"pixel_values": image_features.pixel_values,
"conditional_pixel_values": prompt_features.pixel_values,
}
return encoding
elif text is not None and images is not None:
A__ = image_features.pixel_values
return encoding
elif text is not None:
return encoding
elif visual_prompt is not None:
A__ = {
"conditional_pixel_values": prompt_features.pixel_values,
}
return encoding
else:
return BatchEncoding(data=dict(**SCREAMING_SNAKE_CASE__ ) , tensor_type=SCREAMING_SNAKE_CASE__ )
def UpperCamelCase ( self , *lowercase , **lowercase ) -> List[str]:
'''simple docstring'''
return self.tokenizer.batch_decode(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def UpperCamelCase ( self , *lowercase , **lowercase ) -> Any:
'''simple docstring'''
return self.tokenizer.decode(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
@property
def UpperCamelCase ( self ) -> List[Any]:
'''simple docstring'''
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , SCREAMING_SNAKE_CASE__ , )
return self.image_processor_class
@property
def UpperCamelCase ( self ) -> Any:
'''simple docstring'''
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , SCREAMING_SNAKE_CASE__ , )
return self.image_processor
| 68 |
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class lowerCAmelCase__ ( __lowercase ):
@staticmethod
@abstractmethod
def __A ( SCREAMING_SNAKE_CASE__ : ArgumentParser ) -> str:
raise NotImplementedError()
@abstractmethod
def __A ( self : Optional[int] ) -> Union[str, Any]:
raise NotImplementedError()
| 270 | 0 |
"""simple docstring"""
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from huggingface_hub import HfFolder, Repository, create_repo, delete_repo
from requests.exceptions import HTTPError
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
PROCESSOR_MAPPING,
TOKENIZER_MAPPING,
AutoConfig,
AutoFeatureExtractor,
AutoProcessor,
AutoTokenizer,
BertTokenizer,
ProcessorMixin,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
)
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
from test_module.custom_processing import CustomProcessor # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
__snake_case = get_tests_dir('''fixtures/dummy_feature_extractor_config.json''')
__snake_case = get_tests_dir('''fixtures/vocab.json''')
__snake_case = get_tests_dir('''fixtures''')
class __lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
A_ : str = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """bla""", """blou"""]
def _UpperCAmelCase ( self ) -> List[Any]:
_a = 0
def _UpperCAmelCase ( self ) -> List[Any]:
_a = AutoProcessor.from_pretrained('''facebook/wav2vec2-base-960h''' )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
_a = WavaVecaConfig()
_a = AutoProcessor.from_pretrained('''facebook/wav2vec2-base-960h''' )
# save in new folder
model_config.save_pretrained(SCREAMING_SNAKE_CASE__ )
processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
_a = AutoProcessor.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( self ) -> Any:
with tempfile.TemporaryDirectory() as tmpdirname:
# copy relevant files
copyfile(SCREAMING_SNAKE_CASE__ , os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
copyfile(SCREAMING_SNAKE_CASE__ , os.path.join(SCREAMING_SNAKE_CASE__ , '''vocab.json''' ) )
_a = AutoProcessor.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( self ) -> Optional[Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
_a = WavaVecaFeatureExtractor()
_a = AutoTokenizer.from_pretrained('''facebook/wav2vec2-base-960h''' )
_a = WavaVecaProcessor(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# save in new folder
processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
# drop `processor_class` in tokenizer
with open(os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , '''r''' ) as f:
_a = json.load(SCREAMING_SNAKE_CASE__ )
config_dict.pop('''processor_class''' )
with open(os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , '''w''' ) as f:
f.write(json.dumps(SCREAMING_SNAKE_CASE__ ) )
_a = AutoProcessor.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( self ) -> str:
with tempfile.TemporaryDirectory() as tmpdirname:
_a = WavaVecaFeatureExtractor()
_a = AutoTokenizer.from_pretrained('''facebook/wav2vec2-base-960h''' )
_a = WavaVecaProcessor(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# save in new folder
processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
# drop `processor_class` in feature extractor
with open(os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , '''r''' ) as f:
_a = json.load(SCREAMING_SNAKE_CASE__ )
config_dict.pop('''processor_class''' )
with open(os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , '''w''' ) as f:
f.write(json.dumps(SCREAMING_SNAKE_CASE__ ) )
_a = AutoProcessor.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( self ) -> Dict:
with tempfile.TemporaryDirectory() as tmpdirname:
_a = WavaVecaConfig(processor_class='''Wav2Vec2Processor''' )
model_config.save_pretrained(SCREAMING_SNAKE_CASE__ )
# copy relevant files
copyfile(SCREAMING_SNAKE_CASE__ , os.path.join(SCREAMING_SNAKE_CASE__ , '''vocab.json''' ) )
# create emtpy sample processor
with open(os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , '''w''' ) as f:
f.write('''{}''' )
_a = AutoProcessor.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( self ) -> Any:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
_a = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
_a = AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=SCREAMING_SNAKE_CASE__ )
_a = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=SCREAMING_SNAKE_CASE__ )
self.assertTrue(processor.special_attribute_present )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
_a = processor.feature_extractor
self.assertTrue(feature_extractor.special_attribute_present )
self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' )
_a = processor.tokenizer
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
# Test we can also load the slow version
_a = AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=SCREAMING_SNAKE_CASE__ , use_fast=SCREAMING_SNAKE_CASE__ )
_a = new_processor.tokenizer
self.assertTrue(new_tokenizer.special_attribute_present )
self.assertEqual(new_tokenizer.__class__.__name__ , '''NewTokenizer''' )
else:
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
def _UpperCAmelCase ( self ) -> Tuple:
try:
AutoConfig.register('''custom''' , SCREAMING_SNAKE_CASE__ )
AutoFeatureExtractor.register(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
AutoTokenizer.register(SCREAMING_SNAKE_CASE__ , slow_tokenizer_class=SCREAMING_SNAKE_CASE__ )
AutoProcessor.register(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
AutoProcessor.register(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Now that the config is registered, it can be used as any other config with the auto-API
_a = CustomFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE__ )
with tempfile.TemporaryDirectory() as tmp_dir:
_a = os.path.join(SCREAMING_SNAKE_CASE__ , '''vocab.txt''' )
with open(SCREAMING_SNAKE_CASE__ , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
_a = CustomTokenizer(SCREAMING_SNAKE_CASE__ )
_a = CustomProcessor(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
_a = AutoProcessor.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def _UpperCAmelCase ( self ) -> Dict:
class __lowerCamelCase ( __lowercase ):
'''simple docstring'''
A_ : List[Any] = False
class __lowerCamelCase ( __lowercase ):
'''simple docstring'''
A_ : List[str] = False
class __lowerCamelCase ( __lowercase ):
'''simple docstring'''
A_ : Union[str, Any] = """AutoFeatureExtractor"""
A_ : int = """AutoTokenizer"""
A_ : int = False
try:
AutoConfig.register('''custom''' , SCREAMING_SNAKE_CASE__ )
AutoFeatureExtractor.register(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
AutoTokenizer.register(SCREAMING_SNAKE_CASE__ , slow_tokenizer_class=SCREAMING_SNAKE_CASE__ )
AutoProcessor.register(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# If remote code is not set, the default is to use local classes.
_a = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote code is disabled, we load the local ones.
_a = AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=SCREAMING_SNAKE_CASE__ )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub.
_a = AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=SCREAMING_SNAKE_CASE__ )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
self.assertTrue(processor.special_attribute_present )
self.assertTrue(processor.feature_extractor.special_attribute_present )
self.assertTrue(processor.tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def _UpperCAmelCase ( self ) -> Any:
_a = AutoProcessor.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
self.assertEqual(processor.__class__.__name__ , '''BertTokenizerFast''' )
def _UpperCAmelCase ( self ) -> Dict:
_a = AutoProcessor.from_pretrained('''hf-internal-testing/tiny-random-convnext''' )
self.assertEqual(processor.__class__.__name__ , '''ConvNextImageProcessor''' )
@is_staging_test
class __lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
A_ : Dict = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """bla""", """blou"""]
@classmethod
def _UpperCAmelCase ( cls ) -> Dict:
_a = TOKEN
HfFolder.save_token(SCREAMING_SNAKE_CASE__ )
@classmethod
def _UpperCAmelCase ( cls ) -> Union[str, Any]:
try:
delete_repo(token=cls._token , repo_id='''test-processor''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-processor-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-processor''' )
except HTTPError:
pass
def _UpperCAmelCase ( self ) -> Union[str, Any]:
_a = WavaVecaProcessor.from_pretrained(SCREAMING_SNAKE_CASE__ )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(SCREAMING_SNAKE_CASE__ , '''test-processor''' ) , push_to_hub=SCREAMING_SNAKE_CASE__ , use_auth_token=self._token )
_a = WavaVecaProcessor.from_pretrained(F'{USER}/test-processor' )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(SCREAMING_SNAKE_CASE__ , getattr(new_processor.feature_extractor , SCREAMING_SNAKE_CASE__ ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def _UpperCAmelCase ( self ) -> Dict:
_a = WavaVecaProcessor.from_pretrained(SCREAMING_SNAKE_CASE__ )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(SCREAMING_SNAKE_CASE__ , '''test-processor-org''' ) , push_to_hub=SCREAMING_SNAKE_CASE__ , use_auth_token=self._token , organization='''valid_org''' , )
_a = WavaVecaProcessor.from_pretrained('''valid_org/test-processor-org''' )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(SCREAMING_SNAKE_CASE__ , getattr(new_processor.feature_extractor , SCREAMING_SNAKE_CASE__ ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def _UpperCAmelCase ( self ) -> str:
CustomFeatureExtractor.register_for_auto_class()
CustomTokenizer.register_for_auto_class()
CustomProcessor.register_for_auto_class()
_a = CustomFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE__ )
with tempfile.TemporaryDirectory() as tmp_dir:
_a = os.path.join(SCREAMING_SNAKE_CASE__ , '''vocab.txt''' )
with open(SCREAMING_SNAKE_CASE__ , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
_a = CustomTokenizer(SCREAMING_SNAKE_CASE__ )
_a = CustomProcessor(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
with tempfile.TemporaryDirectory() as tmp_dir:
create_repo(F'{USER}/test-dynamic-processor' , token=self._token )
_a = Repository(SCREAMING_SNAKE_CASE__ , clone_from=F'{USER}/test-dynamic-processor' , token=self._token )
processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
# This has added the proper auto_map field to the feature extractor config
self.assertDictEqual(
processor.feature_extractor.auto_map , {
'''AutoFeatureExtractor''': '''custom_feature_extraction.CustomFeatureExtractor''',
'''AutoProcessor''': '''custom_processing.CustomProcessor''',
} , )
# This has added the proper auto_map field to the tokenizer config
with open(os.path.join(SCREAMING_SNAKE_CASE__ , '''tokenizer_config.json''' ) ) as f:
_a = json.load(SCREAMING_SNAKE_CASE__ )
self.assertDictEqual(
tokenizer_config['''auto_map'''] , {
'''AutoTokenizer''': ['''custom_tokenization.CustomTokenizer''', None],
'''AutoProcessor''': '''custom_processing.CustomProcessor''',
} , )
# The code has been copied from fixtures
self.assertTrue(os.path.isfile(os.path.join(SCREAMING_SNAKE_CASE__ , '''custom_feature_extraction.py''' ) ) )
self.assertTrue(os.path.isfile(os.path.join(SCREAMING_SNAKE_CASE__ , '''custom_tokenization.py''' ) ) )
self.assertTrue(os.path.isfile(os.path.join(SCREAMING_SNAKE_CASE__ , '''custom_processing.py''' ) ) )
repo.push_to_hub()
_a = AutoProcessor.from_pretrained(F'{USER}/test-dynamic-processor' , trust_remote_code=SCREAMING_SNAKE_CASE__ )
# Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module
self.assertEqual(new_processor.__class__.__name__ , '''CustomProcessor''' ) | 320 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("""TEST_SAGEMAKER""" , """False""" ) ) is not True , reason="""Skipping test because should only be run when releasing minor transformers version""" , )
@pytest.mark.usefixtures("""sm_env""" )
@parameterized_class(
[
{
"""framework""": """pytorch""",
"""script""": """run_glue_model_parallelism.py""",
"""model_name_or_path""": """roberta-large""",
"""instance_type""": """ml.p3dn.24xlarge""",
"""results""": {"""train_runtime""": 1_600, """eval_accuracy""": 0.3, """eval_loss""": 1.2},
},
{
"""framework""": """pytorch""",
"""script""": """run_glue.py""",
"""model_name_or_path""": """roberta-large""",
"""instance_type""": """ml.p3dn.24xlarge""",
"""results""": {"""train_runtime""": 1_600, """eval_accuracy""": 0.3, """eval_loss""": 1.2},
},
] )
class lowerCAmelCase__ ( unittest.TestCase ):
def __A ( self : Optional[int] ) -> List[Any]:
if self.framework == "pytorch":
subprocess.run(
f'''cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'''.split() , encoding='''utf-8''' , check=SCREAMING_SNAKE_CASE__ , )
assert hasattr(self , '''env''' )
def __A ( self : List[str] , SCREAMING_SNAKE_CASE__ : List[Any] ) -> Optional[Any]:
# configuration for running training on smdistributed Model Parallel
__lowerCamelCase = {
'''enabled''': True,
'''processes_per_host''': 8,
}
__lowerCamelCase = {
'''enabled''': True,
'''parameters''': {
'''microbatches''': 4,
'''placement_strategy''': '''spread''',
'''pipeline''': '''interleaved''',
'''optimize''': '''speed''',
'''partitions''': 4,
'''ddp''': True,
},
}
__lowerCamelCase = {'''smdistributed''': {'''modelparallel''': smp_options}, '''mpi''': mpi_options}
__lowerCamelCase = '''trainer''' if self.script == '''run_glue.py''' else '''smtrainer'''
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f'''{self.env.base_job_name}-{instance_count}-smp-{name_extension}''' , instance_count=SCREAMING_SNAKE_CASE__ , instance_type=self.instance_type , debugger_hook_config=SCREAMING_SNAKE_CASE__ , hyperparameters={
**self.env.hyperparameters,
'''model_name_or_path''': self.model_name_or_path,
'''max_steps''': 5_00,
} , metric_definitions=self.env.metric_definitions , distribution=SCREAMING_SNAKE_CASE__ , py_version='''py36''' , )
def __A ( self : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] ) -> List[str]:
TrainingJobAnalytics(SCREAMING_SNAKE_CASE__ ).export_csv(f'''{self.env.test_path}/{job_name}_metrics.csv''' )
@parameterized.expand([(1,)] )
def __A ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Optional[int]:
# create estimator
__lowerCamelCase = self.create_estimator(SCREAMING_SNAKE_CASE__ )
# run training
estimator.fit()
# result dataframe
__lowerCamelCase = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
__lowerCamelCase = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] )
__lowerCamelCase = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
__lowerCamelCase = (
Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' , 99_99_99 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy )
assert all(t <= self.results['''eval_loss'''] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f'''{estimator.latest_training_job.name}.json''' , '''w''' ) as outfile:
json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , SCREAMING_SNAKE_CASE__ )
| 270 | 0 |
import math
import os
import re
import sys
import unittest
from pathlib import Path
from typing import Tuple
from unittest.mock import patch
from parameterized import parameterized
from transformers.testing_utils import (
CaptureStderr,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
get_torch_dist_unique_port,
require_apex,
require_bitsandbytes,
require_fairscale,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
require_torch_non_multi_gpu,
slow,
)
from transformers.trainer_callback import TrainerState
from transformers.trainer_utils import set_seed
A : Optional[int] = os.path.abspath(os.path.dirname(__file__))
with ExtendSysPath(F'{bindir}/../../examples/pytorch/translation'):
from run_translation import main # noqa
set_seed(4_2)
A : Any = "sshleifer/student_marian_en_ro_6_1"
A : Tuple = "sshleifer/tiny-mbart"
@require_torch
class A ( __lowercase ):
'''simple docstring'''
def lowerCamelCase__ (self : List[Any] , _UpperCAmelCase : Tuple=False , _UpperCAmelCase : Optional[Any]=None , _UpperCAmelCase : Optional[Any]=True , _UpperCAmelCase : str=True , _UpperCAmelCase : Optional[Any]=True , _UpperCAmelCase : Optional[int]=True , ) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.run_trainer(
eval_steps=1 , max_len=12 , model_name=SCREAMING_SNAKE_CASE__ , num_train_epochs=1 , distributed=SCREAMING_SNAKE_CASE__ , extra_args_str=SCREAMING_SNAKE_CASE__ , predict_with_generate=SCREAMING_SNAKE_CASE__ , do_train=SCREAMING_SNAKE_CASE__ , do_eval=SCREAMING_SNAKE_CASE__ , do_predict=SCREAMING_SNAKE_CASE__ , )
lowercase__ = TrainerState.load_from_json(os.path.join(SCREAMING_SNAKE_CASE__ , """trainer_state.json""" ) ).log_history
if not do_eval:
return
lowercase__ = [log for log in logs if """eval_loss""" in log.keys()]
lowercase__ = eval_metrics[0]
if predict_with_generate:
assert "eval_bleu" in first_step_stats
lowercase__ = eval_metrics[-1]
assert isinstance(last_step_stats["""eval_bleu"""] , SCREAMING_SNAKE_CASE__ )
assert not math.isnan(float(last_step_stats["""eval_loss"""] ) ), "eval_loss must not be `nan`"
@require_torch_non_multi_gpu
def lowerCamelCase__ (self : Optional[int] ) -> int:
"""simple docstring"""
self.run_seqaseq_quick()
@require_torch_multi_gpu
def lowerCamelCase__ (self : int ) -> List[str]:
"""simple docstring"""
self.run_seqaseq_quick(distributed=SCREAMING_SNAKE_CASE__ )
@require_torch_multi_gpu
def lowerCamelCase__ (self : Optional[Any] ) -> Tuple:
"""simple docstring"""
self.run_seqaseq_quick(distributed=SCREAMING_SNAKE_CASE__ )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def lowerCamelCase__ (self : Dict ) -> Tuple:
"""simple docstring"""
self.run_seqaseq_quick(distributed=SCREAMING_SNAKE_CASE__ , extra_args_str="""--sharded_ddp simple""" )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def lowerCamelCase__ (self : Optional[int] ) -> List[str]:
"""simple docstring"""
self.run_seqaseq_quick(distributed=SCREAMING_SNAKE_CASE__ , extra_args_str="""--sharded_ddp simple --fp16""" )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def lowerCamelCase__ (self : Tuple ) -> Any:
"""simple docstring"""
self.run_seqaseq_quick(distributed=SCREAMING_SNAKE_CASE__ , extra_args_str="""--sharded_ddp zero_dp_2""" , predict_with_generate=SCREAMING_SNAKE_CASE__ )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def lowerCamelCase__ (self : Dict ) -> Tuple:
"""simple docstring"""
self.run_seqaseq_quick(
distributed=SCREAMING_SNAKE_CASE__ , extra_args_str="""--sharded_ddp zero_dp_2 --fp16""" , predict_with_generate=SCREAMING_SNAKE_CASE__ )
@require_apex
@require_torch_gpu
def lowerCamelCase__ (self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
self.run_seqaseq_quick(distributed=SCREAMING_SNAKE_CASE__ , extra_args_str="""--fp16 --fp16_backend=apex""" )
# test 2nd time - was getting eval_loss': nan'
# to reproduce the problem set distributed=False
self.run_seqaseq_quick(distributed=SCREAMING_SNAKE_CASE__ , extra_args_str="""--fp16 --fp16_backend=apex""" )
@parameterized.expand(["""base""", """low""", """high""", """mixed"""] )
@require_torch_multi_gpu
def lowerCamelCase__ (self : Any , _UpperCAmelCase : List[str] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = {
# test with the default log_level - should be info and thus log info once
"""base""": {"""extra_args_str""": """""", """n_matches""": 1},
# test with low log_level and log_level_replica - should be noisy on all processes
# now the info string should appear twice on 2 processes
"""low""": {"""extra_args_str""": """--log_level debug --log_level_replica debug""", """n_matches""": 2},
# test with high log_level and low log_level_replica
# now the info string should appear once only on the replica
"""high""": {"""extra_args_str""": """--log_level error --log_level_replica debug""", """n_matches""": 1},
# test with high log_level and log_level_replica - should be quiet on all processes
"""mixed""": {"""extra_args_str""": """--log_level error --log_level_replica error""", """n_matches""": 0},
}
lowercase__ = experiments[experiment_id]
lowercase__ = {"""distributed""": True, """predict_with_generate""": False, """do_eval""": False, """do_predict""": False}
lowercase__ = """Running training"""
with CaptureStderr() as cl:
self.run_seqaseq_quick(**SCREAMING_SNAKE_CASE__ , extra_args_str=data["""extra_args_str"""] )
lowercase__ = len(re.findall(SCREAMING_SNAKE_CASE__ , cl.err ) )
self.assertEqual(SCREAMING_SNAKE_CASE__ , data["""n_matches"""] )
@slow
def lowerCamelCase__ (self : Any ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = self.run_trainer(
eval_steps=2 , max_len=128 , model_name=SCREAMING_SNAKE_CASE__ , learning_rate=3E-4 , num_train_epochs=10 , distributed=SCREAMING_SNAKE_CASE__ , )
# Check metrics
lowercase__ = TrainerState.load_from_json(os.path.join(SCREAMING_SNAKE_CASE__ , """trainer_state.json""" ) ).log_history
lowercase__ = [log for log in logs if """eval_loss""" in log.keys()]
lowercase__ = eval_metrics[0]
lowercase__ = eval_metrics[-1]
assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing"
assert isinstance(last_step_stats["""eval_bleu"""] , SCREAMING_SNAKE_CASE__ )
# test if do_predict saves generations and metrics
lowercase__ = os.listdir(SCREAMING_SNAKE_CASE__ )
lowercase__ = {os.path.basename(SCREAMING_SNAKE_CASE__ ) for p in contents}
assert "generated_predictions.txt" in contents
assert "predict_results.json" in contents
@slow
@require_bitsandbytes
def lowerCamelCase__ (self : Optional[int] ) -> str:
"""simple docstring"""
from transformers.training_args import OptimizerNames
def train_and_return_metrics(_UpperCAmelCase : str ) -> Tuple[int, float]:
lowercase__ = """--skip_memory_metrics 0"""
lowercase__ = self.run_trainer(
max_len=128 , model_name=SCREAMING_SNAKE_CASE__ , learning_rate=3E-4 , num_train_epochs=1 , optim=SCREAMING_SNAKE_CASE__ , distributed=SCREAMING_SNAKE_CASE__ , extra_args_str=SCREAMING_SNAKE_CASE__ , do_eval=SCREAMING_SNAKE_CASE__ , do_predict=SCREAMING_SNAKE_CASE__ , n_gpus_to_use=1 , )
# Check metrics
lowercase__ = TrainerState.load_from_json(Path(SCREAMING_SNAKE_CASE__ , """trainer_state.json""" ) ).log_history
lowercase__ = int(logs[0]["""train_mem_gpu_peaked_delta"""] / 2**20 )
lowercase__ = int(logs[0]["""train_mem_gpu_alloc_delta"""] / 2**20 )
lowercase__ = logs[0]["""train_loss"""]
return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss
lowercase__ , lowercase__ , lowercase__ = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value )
lowercase__ , lowercase__ , lowercase__ = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value )
lowercase__ = gpu_alloc_mem_orig - gpu_alloc_mem_bnb
lowercase__ = gpu_peak_mem_orig + gpu_alloc_mem_orig
lowercase__ = gpu_peak_mem_bnb + gpu_alloc_mem_bnb
lowercase__ = gpu_total_mem_orig - gpu_total_mem_bnb
# sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which
# doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized
# in 2 bytes and the diff in optim memory usage is derived as so:
#
# - normal 25*8=~200MB (8 bytes per param)
# - bnb 25*2= ~50MB (2 bytes per param)
#
# Thus we should expect ~150MB total memory saved.
#
# Peak memory should be the same - the total should be different by about that same margin
#
# After leaving a small margin to accommodate for differences between gpus let's check
# that we have at least 120MB in savings
lowercase__ = 120
# uncomment the following if this test starts failing - requires py38 for a new print feature
# gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb
# print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB")
# print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB")
# print(f"{gpu_alloc_mem_diff=}MB")
# print(f"{gpu_peak_mem_diff=}MB")
# print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB")
# print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB")
self.assertGreater(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , """should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got"""
f''' a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and'''
f''' gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB''' , )
self.assertGreater(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , """should use ~150MB less total gpu memory with BNB, compared to without it for this model but got"""
f''' a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and'''
f''' gpu_total_mem_bnb={gpu_total_mem_bnb}MB''' , )
self.assertEqual(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , f'''loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}''' )
def lowerCamelCase__ (self : Dict , _UpperCAmelCase : int , _UpperCAmelCase : str , _UpperCAmelCase : int , _UpperCAmelCase : float = 3E-3 , _UpperCAmelCase : str = "adafactor" , _UpperCAmelCase : bool = False , _UpperCAmelCase : str = None , _UpperCAmelCase : int = 0 , _UpperCAmelCase : bool = True , _UpperCAmelCase : bool = True , _UpperCAmelCase : bool = True , _UpperCAmelCase : bool = True , _UpperCAmelCase : int = None , ) -> List[Any]:
"""simple docstring"""
lowercase__ = self.test_file_dir / """../fixtures/tests_samples/wmt_en_ro"""
lowercase__ = self.get_auto_remove_tmp_dir()
lowercase__ = f'''
--model_name_or_path {model_name}
--train_file {data_dir}/train.json
--validation_file {data_dir}/val.json
--test_file {data_dir}/test.json
--output_dir {output_dir}
--overwrite_output_dir
--max_train_samples 8
--max_source_length {max_len}
--max_target_length {max_len}
--do_train
--num_train_epochs {str(SCREAMING_SNAKE_CASE__ )}
--per_device_train_batch_size 4
--learning_rate {learning_rate}
--warmup_steps 8
--logging_steps 0
--logging_strategy no
--save_steps {str(SCREAMING_SNAKE_CASE__ )}
--group_by_length
--label_smoothing_factor 0.1
--target_lang ro_RO
--source_lang en_XX
'''.split()
lowercase__ = f'''
--do_eval
--per_device_eval_batch_size 4
--max_eval_samples 8
--val_max_target_length {max_len}
--evaluation_strategy steps
--eval_steps {str(SCREAMING_SNAKE_CASE__ )}
'''.split()
lowercase__ = """
--do_predict
""".split()
lowercase__ = []
if do_train:
args += args_train
if do_eval:
args += args_eval
if do_predict:
args += args_predict
if predict_with_generate:
args += "--predict_with_generate".split()
if do_train:
if optim == "adafactor":
args += "--adafactor".split()
else:
args += f'''--optim {optim}'''.split()
if extra_args_str is not None:
args += extra_args_str.split()
if distributed:
if n_gpus_to_use is None:
lowercase__ = get_gpu_count()
lowercase__ = get_torch_dist_unique_port()
lowercase__ = f'''
-m torch.distributed.run
--nproc_per_node={n_gpus_to_use}
--master_port={master_port}
{self.examples_dir_str}/pytorch/translation/run_translation.py
'''.split()
lowercase__ = [sys.executable] + distributed_args + args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(SCREAMING_SNAKE_CASE__ , env=self.get_env() )
else:
lowercase__ = ["""run_translation.py"""] + args
with patch.object(SCREAMING_SNAKE_CASE__ , """argv""" , SCREAMING_SNAKE_CASE__ ):
main()
return output_dir
| 305 |
from __future__ import annotations
from fractions import Fraction
def __magic_name__ ( __lowerCAmelCase : int , __lowerCAmelCase : int ) -> bool:
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def __magic_name__ ( __lowerCAmelCase : int ) -> list[str]:
__lowerCamelCase = []
__lowerCamelCase = 11
__lowerCamelCase = int('''1''' + '''0''' * digit_len )
for num in range(__lowerCAmelCase , __lowerCAmelCase ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(__lowerCAmelCase , __lowerCAmelCase ):
solutions.append(f'''{num}/{den}''' )
den += 1
num += 1
__lowerCamelCase = 10
return solutions
def __magic_name__ ( __lowerCAmelCase : int = 2 ) -> int:
__lowerCamelCase = 1.0
for fraction in fraction_list(__lowerCAmelCase ):
__lowerCamelCase = Fraction(__lowerCAmelCase )
result *= frac.denominator / frac.numerator
return int(__lowerCAmelCase )
if __name__ == "__main__":
print(solution())
| 270 | 0 |
'''simple docstring'''
import functools
from typing import Any
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : list[str] ):
'''simple docstring'''
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or len(__lowerCAmelCase ) == 0:
raise ValueError("""the string should be not empty string""" )
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or not all(
isinstance(__lowerCAmelCase , __lowerCAmelCase ) and len(__lowerCAmelCase ) > 0 for item in words ):
raise ValueError("""the words should be a list of non-empty strings""" )
# Build trie
UpperCAmelCase__ = {}
UpperCAmelCase__ = """WORD_KEEPER"""
for word in words:
UpperCAmelCase__ = trie
for c in word:
if c not in trie_node:
UpperCAmelCase__ = {}
UpperCAmelCase__ = trie_node[c]
UpperCAmelCase__ = True
UpperCAmelCase__ = len(__lowerCAmelCase )
# Dynamic programming method
@functools.cache
def is_breakable(SCREAMING_SNAKE_CASE__ : int ) -> bool:
if index == len_string:
return True
UpperCAmelCase__ = trie
for i in range(__lowerCAmelCase , __lowerCAmelCase ):
UpperCAmelCase__ = trie_node.get(string[i] , __lowerCAmelCase )
if trie_node is None:
return False
if trie_node.get(__lowerCAmelCase , __lowerCAmelCase ) and is_breakable(i + 1 ):
return True
return False
return is_breakable(0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 346 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
SCREAMING_SNAKE_CASE__ : List[str] = {
"configuration_blip": [
"BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlipConfig",
"BlipTextConfig",
"BlipVisionConfig",
],
"processing_blip": ["BlipProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : str = ["BlipImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : int = [
"BLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlipModel",
"BlipPreTrainedModel",
"BlipForConditionalGeneration",
"BlipForQuestionAnswering",
"BlipVisionModel",
"BlipTextModel",
"BlipForImageTextRetrieval",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Any = [
"TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFBlipModel",
"TFBlipPreTrainedModel",
"TFBlipForConditionalGeneration",
"TFBlipForQuestionAnswering",
"TFBlipVisionModel",
"TFBlipTextModel",
"TFBlipForImageTextRetrieval",
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 270 | 0 |
import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
A : Union[str, Any] = data_utils.TransfoXLTokenizer
A : str = data_utils.TransfoXLCorpus
A : List[Any] = data_utils
A : Any = data_utils
def lowercase_ ( _A : int , _A : str , _A : int , _A : Optional[int] ):
"""simple docstring"""
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(__lowerCAmelCase , "rb" ) as fp:
lowerCamelCase__ : List[Any] = pickle.load(__lowerCAmelCase , encoding="latin1" )
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
lowerCamelCase__ : List[str] = pytorch_dump_folder_path + "/" + VOCAB_FILES_NAMES["pretrained_vocab_file"]
print(F"Save vocabulary to {pytorch_vocab_dump_path}" )
lowerCamelCase__ : str = corpus.vocab.__dict__
torch.save(__lowerCAmelCase , __lowerCAmelCase )
lowerCamelCase__ : Optional[Any] = corpus.__dict__
corpus_dict_no_vocab.pop("vocab" , __lowerCAmelCase )
lowerCamelCase__ : str = pytorch_dump_folder_path + "/" + CORPUS_NAME
print(F"Save dataset to {pytorch_dataset_dump_path}" )
torch.save(__lowerCAmelCase , __lowerCAmelCase )
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
lowerCamelCase__ : List[Any] = os.path.abspath(__lowerCAmelCase )
lowerCamelCase__ : List[Any] = os.path.abspath(__lowerCAmelCase )
print(F"Converting Transformer XL checkpoint from {tf_path} with config at {config_path}." )
# Initialise PyTorch model
if transfo_xl_config_file == "":
lowerCamelCase__ : Optional[int] = TransfoXLConfig()
else:
lowerCamelCase__ : str = TransfoXLConfig.from_json_file(__lowerCAmelCase )
print(F"Building PyTorch model from configuration: {config}" )
lowerCamelCase__ : int = TransfoXLLMHeadModel(__lowerCAmelCase )
lowerCamelCase__ : int = load_tf_weights_in_transfo_xl(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Save pytorch-model
lowerCamelCase__ : Any = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
lowerCamelCase__ : Optional[Any] = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
print(F"Save PyTorch model to {os.path.abspath(__lowerCAmelCase )}" )
torch.save(model.state_dict() , __lowerCAmelCase )
print(F"Save configuration file to {os.path.abspath(__lowerCAmelCase )}" )
with open(__lowerCAmelCase , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
A : Any = argparse.ArgumentParser()
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the folder to store the PyTorch model or dataset/vocab.",
)
parser.add_argument(
"--tf_checkpoint_path",
default="",
type=str,
help="An optional path to a TensorFlow checkpoint path to be converted.",
)
parser.add_argument(
"--transfo_xl_config_file",
default="",
type=str,
help=(
"An optional config json file corresponding to the pre-trained BERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--transfo_xl_dataset_file",
default="",
type=str,
help="An optional dataset file to be converted in a vocabulary.",
)
A : List[Any] = parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
)
| 184 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE__ : Optional[Any] = {"configuration_wavlm": ["WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "WavLMConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : int = [
"WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"WavLMForAudioFrameClassification",
"WavLMForCTC",
"WavLMForSequenceClassification",
"WavLMForXVector",
"WavLMModel",
"WavLMPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavlm import (
WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST,
WavLMForAudioFrameClassification,
WavLMForCTC,
WavLMForSequenceClassification,
WavLMForXVector,
WavLMModel,
WavLMPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 270 | 0 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
class A_ ( __lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = None
class A_ ( __lowercase , __lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 2
@register_to_config
def __init__( self :Optional[Any] , lowerCamelCase_ :float = 0.02 , lowerCamelCase_ :float = 100 , lowerCamelCase_ :float = 1.0_07 , lowerCamelCase_ :float = 80 , lowerCamelCase_ :float = 0.05 , lowerCamelCase_ :float = 50 , ):
"""simple docstring"""
lowerCamelCase__ : List[str] =sigma_max
# setable values
lowerCamelCase__ : Tuple =None
lowerCamelCase__ : Optional[int] =None
lowerCamelCase__ : int =None # sigma(t_i)
def UpperCAmelCase__ ( self :Dict , lowerCamelCase_ :torch.FloatTensor , lowerCamelCase_ :Optional[int] = None ):
"""simple docstring"""
return sample
def UpperCAmelCase__ ( self :List[str] , lowerCamelCase_ :int , lowerCamelCase_ :Union[str, torch.device] = None ):
"""simple docstring"""
lowerCamelCase__ : List[Any] =num_inference_steps
lowerCamelCase__ : List[str] =np.arange(0 , self.num_inference_steps )[::-1].copy()
lowerCamelCase__ : Optional[Any] =torch.from_numpy(SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ : str =[
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in self.timesteps
]
lowerCamelCase__ : Union[str, Any] =torch.tensor(SCREAMING_SNAKE_CASE__ , dtype=torch.floataa , device=SCREAMING_SNAKE_CASE__ )
def UpperCAmelCase__ ( self :Tuple , lowerCamelCase_ :torch.FloatTensor , lowerCamelCase_ :float , lowerCamelCase_ :Optional[torch.Generator] = None ):
"""simple docstring"""
if self.config.s_min <= sigma <= self.config.s_max:
lowerCamelCase__ : str =min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1 )
else:
lowerCamelCase__ : Dict =0
# sample eps ~ N(0, S_noise^2 * I)
lowerCamelCase__ : Union[str, Any] =self.config.s_noise * randn_tensor(sample.shape , generator=SCREAMING_SNAKE_CASE__ ).to(sample.device )
lowerCamelCase__ : Any =sigma + gamma * sigma
lowerCamelCase__ : Tuple =sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def UpperCAmelCase__ ( self :Union[str, Any] , lowerCamelCase_ :torch.FloatTensor , lowerCamelCase_ :float , lowerCamelCase_ :float , lowerCamelCase_ :torch.FloatTensor , lowerCamelCase_ :bool = True , ):
"""simple docstring"""
lowerCamelCase__ : str =sample_hat + sigma_hat * model_output
lowerCamelCase__ : Optional[int] =(sample_hat - pred_original_sample) / sigma_hat
lowerCamelCase__ : int =sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=SCREAMING_SNAKE_CASE__ , derivative=SCREAMING_SNAKE_CASE__ , pred_original_sample=SCREAMING_SNAKE_CASE__ )
def UpperCAmelCase__ ( self :Optional[Any] , lowerCamelCase_ :torch.FloatTensor , lowerCamelCase_ :float , lowerCamelCase_ :float , lowerCamelCase_ :torch.FloatTensor , lowerCamelCase_ :torch.FloatTensor , lowerCamelCase_ :torch.FloatTensor , lowerCamelCase_ :bool = True , ):
"""simple docstring"""
lowerCamelCase__ : List[Any] =sample_prev + sigma_prev * model_output
lowerCamelCase__ : Tuple =(sample_prev - pred_original_sample) / sigma_prev
lowerCamelCase__ : int =sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=SCREAMING_SNAKE_CASE__ , derivative=SCREAMING_SNAKE_CASE__ , pred_original_sample=SCREAMING_SNAKE_CASE__ )
def UpperCAmelCase__ ( self :Optional[Any] , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :List[Any] ):
"""simple docstring"""
raise NotImplementedError() | 126 |
import pprint
import requests
SCREAMING_SNAKE_CASE__ : str = "https://zenquotes.io/api"
def __magic_name__ ( ) -> list:
return requests.get(API_ENDPOINT_URL + '''/today''' ).json()
def __magic_name__ ( ) -> list:
return requests.get(API_ENDPOINT_URL + '''/random''' ).json()
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : int = random_quotes()
pprint.pprint(response)
| 270 | 0 |
"""simple docstring"""
import warnings
from .generation import TFGenerationMixin
class __SCREAMING_SNAKE_CASE ( __lowercase ):
'''simple docstring'''
warnings.warn(
'Importing `TFGenerationMixin` from `src/transformers/generation_tf_utils.py` is deprecated and will '
'be removed in Transformers v5. Import as `from transformers import TFGenerationMixin` instead.' , __lowercase , )
| 238 |
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def __magic_name__ ( __lowerCAmelCase : int ) -> int:
__lowerCamelCase = prime_factors(__lowerCAmelCase )
if is_square_free(__lowerCAmelCase ):
return -1 if len(__lowerCAmelCase ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 270 | 0 |
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
lowercase__ : List[Any] = "\\n Text data.\n Second line of data."
lowercase__ : List[str] = "file"
@pytest.fixture(scope="session" )
def lowerCamelCase__ ( _A ):
'''simple docstring'''
snake_case_ = tmp_path_factory.mktemp("data" ) / (FILE_PATH + ".zstd")
snake_case_ = bytes(__lowerCAmelCase , "utf-8" )
with zstd.open(__lowerCAmelCase , "wb" ) as f:
f.write(__lowerCAmelCase )
return path
@pytest.fixture
def lowerCamelCase__ ( _A ):
'''simple docstring'''
with open(os.path.join(tmpfs.local_root_dir , __lowerCAmelCase ) , "w" ) as f:
f.write(__lowerCAmelCase )
return FILE_PATH
@pytest.mark.parametrize("compression_format" , ["gzip", "xz", "zstd"] )
def lowerCamelCase__ ( _A , _A , _A , _A , _A , _A ):
'''simple docstring'''
snake_case_ = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_path}
snake_case_ = input_paths[compression_format]
snake_case_ = tmp_path / "cache"
snake_case_ = DownloadConfig(cache_dir=__lowerCAmelCase , extract_compressed_file=__lowerCAmelCase )
snake_case_ = cached_path(__lowerCAmelCase , download_config=__lowerCAmelCase )
with open(__lowerCAmelCase ) as f:
snake_case_ = f.read()
with open(__lowerCAmelCase ) as f:
snake_case_ = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize("default_extracted" , [True, False] )
@pytest.mark.parametrize("default_cache_dir" , [True, False] )
def lowerCamelCase__ ( _A , _A , _A , _A , _A ):
'''simple docstring'''
snake_case_ = "custom_cache"
snake_case_ = "custom_extracted_dir"
snake_case_ = tmp_path / "custom_extracted_path"
if default_extracted:
snake_case_ = ("downloads" if default_cache_dir else custom_cache_dir, "extracted")
else:
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_DIR" , __lowerCAmelCase )
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_PATH" , str(__lowerCAmelCase ) )
snake_case_ = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
snake_case_ = xz_file
snake_case_ = (
DownloadConfig(extract_compressed_file=__lowerCAmelCase )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=__lowerCAmelCase )
)
snake_case_ = cached_path(__lowerCAmelCase , download_config=__lowerCAmelCase )
assert Path(__lowerCAmelCase ).parent.parts[-2:] == expected
def lowerCamelCase__ ( _A ):
'''simple docstring'''
snake_case_ = str(Path(__lowerCAmelCase ).resolve() )
assert cached_path(__lowerCAmelCase ) == text_file
# relative path
snake_case_ = str(Path(__lowerCAmelCase ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(__lowerCAmelCase ) == text_file
def lowerCamelCase__ ( _A ):
'''simple docstring'''
snake_case_ = str(tmp_path.resolve() / "__missing_file__.txt" )
with pytest.raises(__lowerCAmelCase ):
cached_path(__lowerCAmelCase )
# relative path
snake_case_ = "./__missing_file__.txt"
with pytest.raises(__lowerCAmelCase ):
cached_path(__lowerCAmelCase )
def lowerCamelCase__ ( _A ):
'''simple docstring'''
snake_case_ = get_from_cache(f"tmp://{tmpfs_file}" )
with open(__lowerCAmelCase ) as f:
snake_case_ = f.read()
assert output_file_content == FILE_CONTENT
@patch("datasets.config.HF_DATASETS_OFFLINE" , __lowerCAmelCase )
def lowerCamelCase__ ( ):
'''simple docstring'''
with pytest.raises(__lowerCAmelCase ):
cached_path("https://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , __lowerCAmelCase )
def lowerCamelCase__ ( _A ):
'''simple docstring'''
snake_case_ = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(__lowerCAmelCase ):
http_get("https://huggingface.co" , temp_file=__lowerCAmelCase )
with pytest.raises(__lowerCAmelCase ):
http_head("https://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , __lowerCAmelCase )
def lowerCamelCase__ ( _A ):
'''simple docstring'''
snake_case_ = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(__lowerCAmelCase ):
ftp_get("ftp://huggingface.co" , temp_file=__lowerCAmelCase )
with pytest.raises(__lowerCAmelCase ):
ftp_head("ftp://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , __lowerCAmelCase )
def lowerCamelCase__ ( _A ):
'''simple docstring'''
snake_case_ = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(__lowerCAmelCase ):
fsspec_get("s3://huggingface.co" , temp_file=__lowerCAmelCase )
with pytest.raises(__lowerCAmelCase ):
fsspec_head("s3://huggingface.co" )
| 187 |
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
SCREAMING_SNAKE_CASE__ : Union[str, Any] = re.compile(r"\b(a|an|the)\b", re.UNICODE)
SCREAMING_SNAKE_CASE__ : int = None
def __magic_name__ ( ) -> str:
__lowerCamelCase = argparse.ArgumentParser('''Official evaluation script for SQuAD version 2.0.''' )
parser.add_argument('''data_file''' , metavar='''data.json''' , help='''Input data JSON file.''' )
parser.add_argument('''pred_file''' , metavar='''pred.json''' , help='''Model predictions.''' )
parser.add_argument(
'''--out-file''' , '''-o''' , metavar='''eval.json''' , help='''Write accuracy metrics to file (default is stdout).''' )
parser.add_argument(
'''--na-prob-file''' , '''-n''' , metavar='''na_prob.json''' , help='''Model estimates of probability of no answer.''' )
parser.add_argument(
'''--na-prob-thresh''' , '''-t''' , type=__lowerCAmelCase , default=1.0 , help='''Predict "" if no-answer probability exceeds this (default = 1.0).''' , )
parser.add_argument(
'''--out-image-dir''' , '''-p''' , metavar='''out_images''' , default=__lowerCAmelCase , help='''Save precision-recall curves to directory.''' )
parser.add_argument('''--verbose''' , '''-v''' , action='''store_true''' )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def __magic_name__ ( __lowerCAmelCase : List[str] ) -> Union[str, Any]:
__lowerCamelCase = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
__lowerCamelCase = bool(qa['''answers''']['''text'''] )
return qid_to_has_ans
def __magic_name__ ( __lowerCAmelCase : Dict ) -> Optional[Any]:
def remove_articles(__lowerCAmelCase : Optional[int] ):
return ARTICLES_REGEX.sub(''' ''' , __lowerCAmelCase )
def white_space_fix(__lowerCAmelCase : Optional[int] ):
return " ".join(text.split() )
def remove_punc(__lowerCAmelCase : Union[str, Any] ):
__lowerCamelCase = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(__lowerCAmelCase : Dict ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(__lowerCAmelCase ) ) ) )
def __magic_name__ ( __lowerCAmelCase : List[Any] ) -> Optional[int]:
if not s:
return []
return normalize_answer(__lowerCAmelCase ).split()
def __magic_name__ ( __lowerCAmelCase : Tuple , __lowerCAmelCase : Tuple ) -> int:
return int(normalize_answer(__lowerCAmelCase ) == normalize_answer(__lowerCAmelCase ) )
def __magic_name__ ( __lowerCAmelCase : Any , __lowerCAmelCase : Tuple ) -> str:
__lowerCamelCase = get_tokens(__lowerCAmelCase )
__lowerCamelCase = get_tokens(__lowerCAmelCase )
__lowerCamelCase = collections.Counter(__lowerCAmelCase ) & collections.Counter(__lowerCAmelCase )
__lowerCamelCase = sum(common.values() )
if len(__lowerCAmelCase ) == 0 or len(__lowerCAmelCase ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
__lowerCamelCase = 1.0 * num_same / len(__lowerCAmelCase )
__lowerCamelCase = 1.0 * num_same / len(__lowerCAmelCase )
__lowerCamelCase = (2 * precision * recall) / (precision + recall)
return fa
def __magic_name__ ( __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] ) -> Optional[Any]:
__lowerCamelCase = {}
__lowerCamelCase = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
__lowerCamelCase = qa['''id''']
__lowerCamelCase = [t for t in qa['''answers''']['''text'''] if normalize_answer(__lowerCAmelCase )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
__lowerCamelCase = ['''''']
if qid not in preds:
print(f'''Missing prediction for {qid}''' )
continue
__lowerCamelCase = preds[qid]
# Take max over all gold answers
__lowerCamelCase = max(compute_exact(__lowerCAmelCase , __lowerCAmelCase ) for a in gold_answers )
__lowerCamelCase = max(compute_fa(__lowerCAmelCase , __lowerCAmelCase ) for a in gold_answers )
return exact_scores, fa_scores
def __magic_name__ ( __lowerCAmelCase : Tuple , __lowerCAmelCase : str , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Union[str, Any] ) -> List[str]:
__lowerCamelCase = {}
for qid, s in scores.items():
__lowerCamelCase = na_probs[qid] > na_prob_thresh
if pred_na:
__lowerCamelCase = float(not qid_to_has_ans[qid] )
else:
__lowerCamelCase = s
return new_scores
def __magic_name__ ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[Any]=None ) -> Union[str, Any]:
if not qid_list:
__lowerCamelCase = len(__lowerCAmelCase )
return collections.OrderedDict(
[
('''exact''', 100.0 * sum(exact_scores.values() ) / total),
('''f1''', 100.0 * sum(fa_scores.values() ) / total),
('''total''', total),
] )
else:
__lowerCamelCase = len(__lowerCAmelCase )
return collections.OrderedDict(
[
('''exact''', 100.0 * sum(exact_scores[k] for k in qid_list ) / total),
('''f1''', 100.0 * sum(fa_scores[k] for k in qid_list ) / total),
('''total''', total),
] )
def __magic_name__ ( __lowerCAmelCase : Any , __lowerCAmelCase : str , __lowerCAmelCase : Optional[Any] ) -> int:
for k in new_eval:
__lowerCamelCase = new_eval[k]
def __magic_name__ ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any] ) -> Optional[Any]:
plt.step(__lowerCAmelCase , __lowerCAmelCase , color='''b''' , alpha=0.2 , where='''post''' )
plt.fill_between(__lowerCAmelCase , __lowerCAmelCase , step='''post''' , alpha=0.2 , color='''b''' )
plt.xlabel('''Recall''' )
plt.ylabel('''Precision''' )
plt.xlim([0.0, 1.05] )
plt.ylim([0.0, 1.05] )
plt.title(__lowerCAmelCase )
plt.savefig(__lowerCAmelCase )
plt.clf()
def __magic_name__ ( __lowerCAmelCase : Any , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any]=None , __lowerCAmelCase : Tuple=None ) -> int:
__lowerCamelCase = sorted(__lowerCAmelCase , key=lambda __lowerCAmelCase : na_probs[k] )
__lowerCamelCase = 0.0
__lowerCamelCase = 1.0
__lowerCamelCase = 0.0
__lowerCamelCase = [1.0]
__lowerCamelCase = [0.0]
__lowerCamelCase = 0.0
for i, qid in enumerate(__lowerCAmelCase ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
__lowerCamelCase = true_pos / float(i + 1 )
__lowerCamelCase = true_pos / float(__lowerCAmelCase )
if i == len(__lowerCAmelCase ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(__lowerCAmelCase )
recalls.append(__lowerCAmelCase )
if out_image:
plot_pr_curve(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return {"ap": 100.0 * avg_prec}
def __magic_name__ ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[str] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[Any] ) -> List[Any]:
if out_image_dir and not os.path.exists(__lowerCAmelCase ):
os.makedirs(__lowerCAmelCase )
__lowerCamelCase = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
__lowerCamelCase = make_precision_recall_eval(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , out_image=os.path.join(__lowerCAmelCase , '''pr_exact.png''' ) , title='''Precision-Recall curve for Exact Match score''' , )
__lowerCamelCase = make_precision_recall_eval(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , out_image=os.path.join(__lowerCAmelCase , '''pr_f1.png''' ) , title='''Precision-Recall curve for F1 score''' , )
__lowerCamelCase = {k: float(__lowerCAmelCase ) for k, v in qid_to_has_ans.items()}
__lowerCamelCase = make_precision_recall_eval(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , out_image=os.path.join(__lowerCAmelCase , '''pr_oracle.png''' ) , title='''Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)''' , )
merge_eval(__lowerCAmelCase , __lowerCAmelCase , '''pr_exact''' )
merge_eval(__lowerCAmelCase , __lowerCAmelCase , '''pr_f1''' )
merge_eval(__lowerCAmelCase , __lowerCAmelCase , '''pr_oracle''' )
def __magic_name__ ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int ) -> Optional[Any]:
if not qid_list:
return
__lowerCamelCase = [na_probs[k] for k in qid_list]
__lowerCamelCase = np.ones_like(__lowerCAmelCase ) / float(len(__lowerCAmelCase ) )
plt.hist(__lowerCAmelCase , weights=__lowerCAmelCase , bins=20 , range=(0.0, 1.0) )
plt.xlabel('''Model probability of no-answer''' )
plt.ylabel('''Proportion of dataset''' )
plt.title(f'''Histogram of no-answer probability: {name}''' )
plt.savefig(os.path.join(__lowerCAmelCase , f'''na_prob_hist_{name}.png''' ) )
plt.clf()
def __magic_name__ ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any] ) -> Optional[int]:
__lowerCamelCase = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
__lowerCamelCase = num_no_ans
__lowerCamelCase = cur_score
__lowerCamelCase = 0.0
__lowerCamelCase = sorted(__lowerCAmelCase , key=lambda __lowerCAmelCase : na_probs[k] )
for i, qid in enumerate(__lowerCAmelCase ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
__lowerCamelCase = scores[qid]
else:
if preds[qid]:
__lowerCamelCase = -1
else:
__lowerCamelCase = 0
cur_score += diff
if cur_score > best_score:
__lowerCamelCase = cur_score
__lowerCamelCase = na_probs[qid]
return 100.0 * best_score / len(__lowerCAmelCase ), best_thresh
def __magic_name__ ( __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : int , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : int , __lowerCAmelCase : Optional[Any] ) -> int:
__lowerCamelCase , __lowerCamelCase = find_best_thresh(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
__lowerCamelCase , __lowerCamelCase = find_best_thresh(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
__lowerCamelCase = best_exact
__lowerCamelCase = exact_thresh
__lowerCamelCase = best_fa
__lowerCamelCase = fa_thresh
def __magic_name__ ( ) -> Optional[int]:
with open(OPTS.data_file ) as f:
__lowerCamelCase = json.load(__lowerCAmelCase )
__lowerCamelCase = dataset_json['''data''']
with open(OPTS.pred_file ) as f:
__lowerCamelCase = json.load(__lowerCAmelCase )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
__lowerCamelCase = json.load(__lowerCAmelCase )
else:
__lowerCamelCase = {k: 0.0 for k in preds}
__lowerCamelCase = make_qid_to_has_ans(__lowerCAmelCase ) # maps qid to True/False
__lowerCamelCase = [k for k, v in qid_to_has_ans.items() if v]
__lowerCamelCase = [k for k, v in qid_to_has_ans.items() if not v]
__lowerCamelCase , __lowerCamelCase = get_raw_scores(__lowerCAmelCase , __lowerCAmelCase )
__lowerCamelCase = apply_no_ans_threshold(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , OPTS.na_prob_thresh )
__lowerCamelCase = apply_no_ans_threshold(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , OPTS.na_prob_thresh )
__lowerCamelCase = make_eval_dict(__lowerCAmelCase , __lowerCAmelCase )
if has_ans_qids:
__lowerCamelCase = make_eval_dict(__lowerCAmelCase , __lowerCAmelCase , qid_list=__lowerCAmelCase )
merge_eval(__lowerCAmelCase , __lowerCAmelCase , '''HasAns''' )
if no_ans_qids:
__lowerCamelCase = make_eval_dict(__lowerCAmelCase , __lowerCAmelCase , qid_list=__lowerCAmelCase )
merge_eval(__lowerCAmelCase , __lowerCAmelCase , '''NoAns''' )
if OPTS.na_prob_file:
find_all_best_thresh(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , OPTS.out_image_dir )
histogram_na_prob(__lowerCAmelCase , __lowerCAmelCase , OPTS.out_image_dir , '''hasAns''' )
histogram_na_prob(__lowerCAmelCase , __lowerCAmelCase , OPTS.out_image_dir , '''noAns''' )
if OPTS.out_file:
with open(OPTS.out_file , '''w''' ) as f:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
else:
print(json.dumps(__lowerCAmelCase , indent=2 ) )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Any = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
main()
| 270 | 0 |
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class __UpperCAmelCase (__lowercase ):
__snake_case : Any = ["""image_processor""", """tokenizer"""]
__snake_case : Any = """AutoImageProcessor"""
__snake_case : Union[str, Any] = """AutoTokenizer"""
def __init__( self: Tuple , UpperCAmelCase_: Optional[int]=None , UpperCAmelCase_: Optional[Any]=None , **UpperCAmelCase_: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , SCREAMING_SNAKE_CASE__ , )
_SCREAMING_SNAKE_CASE = kwargs.pop("""feature_extractor""" )
_SCREAMING_SNAKE_CASE = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
_SCREAMING_SNAKE_CASE = self.image_processor
_SCREAMING_SNAKE_CASE = False
def __call__( self: Union[str, Any] , *UpperCAmelCase_: Any , **UpperCAmelCase_: Optional[Any] ):
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
_SCREAMING_SNAKE_CASE = kwargs.pop("""images""" , SCREAMING_SNAKE_CASE__ )
_SCREAMING_SNAKE_CASE = kwargs.pop("""text""" , SCREAMING_SNAKE_CASE__ )
if len(SCREAMING_SNAKE_CASE__ ) > 0:
_SCREAMING_SNAKE_CASE = args[0]
_SCREAMING_SNAKE_CASE = args[1:]
if images is None and text is None:
raise ValueError("""You need to specify either an `images` or `text` input to process.""" )
if images is not None:
_SCREAMING_SNAKE_CASE = self.image_processor(SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
if text is not None:
_SCREAMING_SNAKE_CASE = self.tokenizer(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
if text is None:
return inputs
elif images is None:
return encodings
else:
_SCREAMING_SNAKE_CASE = encodings["""input_ids"""]
return inputs
def UpperCamelCase ( self: Tuple , *UpperCAmelCase_: Any , **UpperCAmelCase_: Any ):
'''simple docstring'''
return self.tokenizer.batch_decode(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def UpperCamelCase ( self: Optional[int] , *UpperCAmelCase_: Optional[int] , **UpperCAmelCase_: int ):
'''simple docstring'''
return self.tokenizer.decode(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
@contextmanager
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
warnings.warn(
"""`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your """
"""labels by using the argument `text` of the regular `__call__` method (either in the same call as """
"""your images inputs, or in a separate call.""" )
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = self.tokenizer
yield
_SCREAMING_SNAKE_CASE = self.image_processor
_SCREAMING_SNAKE_CASE = False
def UpperCamelCase ( self: Any , UpperCAmelCase_: List[Any] , UpperCAmelCase_: List[Any]=False , UpperCAmelCase_: List[Any]=None ):
'''simple docstring'''
if added_vocab is None:
_SCREAMING_SNAKE_CASE = self.tokenizer.get_added_vocab()
_SCREAMING_SNAKE_CASE = {}
while tokens:
_SCREAMING_SNAKE_CASE = re.search(R"""<s_(.*?)>""" , SCREAMING_SNAKE_CASE__ , re.IGNORECASE )
if start_token is None:
break
_SCREAMING_SNAKE_CASE = start_token.group(1 )
_SCREAMING_SNAKE_CASE = re.search(RF'</s_{key}>' , SCREAMING_SNAKE_CASE__ , re.IGNORECASE )
_SCREAMING_SNAKE_CASE = start_token.group()
if end_token is None:
_SCREAMING_SNAKE_CASE = tokens.replace(SCREAMING_SNAKE_CASE__ , """""" )
else:
_SCREAMING_SNAKE_CASE = end_token.group()
_SCREAMING_SNAKE_CASE = re.escape(SCREAMING_SNAKE_CASE__ )
_SCREAMING_SNAKE_CASE = re.escape(SCREAMING_SNAKE_CASE__ )
_SCREAMING_SNAKE_CASE = re.search(F'{start_token_escaped}(.*?){end_token_escaped}' , SCREAMING_SNAKE_CASE__ , re.IGNORECASE )
if content is not None:
_SCREAMING_SNAKE_CASE = content.group(1 ).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
_SCREAMING_SNAKE_CASE = self.tokenajson(SCREAMING_SNAKE_CASE__ , is_inner_value=SCREAMING_SNAKE_CASE__ , added_vocab=SCREAMING_SNAKE_CASE__ )
if value:
if len(SCREAMING_SNAKE_CASE__ ) == 1:
_SCREAMING_SNAKE_CASE = value[0]
_SCREAMING_SNAKE_CASE = value
else: # leaf nodes
_SCREAMING_SNAKE_CASE = []
for leaf in content.split(R"""<sep/>""" ):
_SCREAMING_SNAKE_CASE = leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
_SCREAMING_SNAKE_CASE = leaf[1:-2] # for categorical special tokens
output[key].append(SCREAMING_SNAKE_CASE__ )
if len(output[key] ) == 1:
_SCREAMING_SNAKE_CASE = output[key][0]
_SCREAMING_SNAKE_CASE = tokens[tokens.find(SCREAMING_SNAKE_CASE__ ) + len(SCREAMING_SNAKE_CASE__ ) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:] , is_inner_value=SCREAMING_SNAKE_CASE__ , added_vocab=SCREAMING_SNAKE_CASE__ )
if len(SCREAMING_SNAKE_CASE__ ):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , SCREAMING_SNAKE_CASE__ , )
return self.image_processor_class
@property
def UpperCamelCase ( self: Any ):
'''simple docstring'''
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , SCREAMING_SNAKE_CASE__ , )
return self.image_processor
| 306 |
import math
import os
import re
import sys
import unittest
from pathlib import Path
from typing import Tuple
from unittest.mock import patch
from parameterized import parameterized
from transformers.testing_utils import (
CaptureStderr,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
get_torch_dist_unique_port,
require_apex,
require_bitsandbytes,
require_fairscale,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
require_torch_non_multi_gpu,
slow,
)
from transformers.trainer_callback import TrainerState
from transformers.trainer_utils import set_seed
SCREAMING_SNAKE_CASE__ : Optional[int] = os.path.abspath(os.path.dirname(__file__))
with ExtendSysPath(F'{bindir}/../../examples/pytorch/translation'):
from run_translation import main # noqa
set_seed(42)
SCREAMING_SNAKE_CASE__ : Any = "sshleifer/student_marian_en_ro_6_1"
SCREAMING_SNAKE_CASE__ : Tuple = "sshleifer/tiny-mbart"
@require_torch
class lowerCAmelCase__ ( __lowercase ):
def __A ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple=False , SCREAMING_SNAKE_CASE__ : Optional[Any]=None , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : Optional[int]=True , ) -> Optional[int]:
__lowerCamelCase = self.run_trainer(
eval_steps=1 , max_len=12 , model_name=SCREAMING_SNAKE_CASE__ , num_train_epochs=1 , distributed=SCREAMING_SNAKE_CASE__ , extra_args_str=SCREAMING_SNAKE_CASE__ , predict_with_generate=SCREAMING_SNAKE_CASE__ , do_train=SCREAMING_SNAKE_CASE__ , do_eval=SCREAMING_SNAKE_CASE__ , do_predict=SCREAMING_SNAKE_CASE__ , )
__lowerCamelCase = TrainerState.load_from_json(os.path.join(SCREAMING_SNAKE_CASE__ , '''trainer_state.json''' ) ).log_history
if not do_eval:
return
__lowerCamelCase = [log for log in logs if '''eval_loss''' in log.keys()]
__lowerCamelCase = eval_metrics[0]
if predict_with_generate:
assert "eval_bleu" in first_step_stats
__lowerCamelCase = eval_metrics[-1]
assert isinstance(last_step_stats['''eval_bleu'''] , SCREAMING_SNAKE_CASE__ )
assert not math.isnan(float(last_step_stats['''eval_loss'''] ) ), "eval_loss must not be `nan`"
@require_torch_non_multi_gpu
def __A ( self : Optional[int] ) -> int:
self.run_seqaseq_quick()
@require_torch_multi_gpu
def __A ( self : int ) -> List[str]:
self.run_seqaseq_quick(distributed=SCREAMING_SNAKE_CASE__ )
@require_torch_multi_gpu
def __A ( self : Optional[Any] ) -> Tuple:
self.run_seqaseq_quick(distributed=SCREAMING_SNAKE_CASE__ )
@unittest.skip('''Requires an update of the env running those tests''' )
@require_torch_multi_gpu
@require_fairscale
def __A ( self : Dict ) -> Tuple:
self.run_seqaseq_quick(distributed=SCREAMING_SNAKE_CASE__ , extra_args_str='''--sharded_ddp simple''' )
@unittest.skip('''Requires an update of the env running those tests''' )
@require_torch_multi_gpu
@require_fairscale
def __A ( self : Optional[int] ) -> List[str]:
self.run_seqaseq_quick(distributed=SCREAMING_SNAKE_CASE__ , extra_args_str='''--sharded_ddp simple --fp16''' )
@unittest.skip('''Requires an update of the env running those tests''' )
@require_torch_multi_gpu
@require_fairscale
def __A ( self : Tuple ) -> Any:
self.run_seqaseq_quick(distributed=SCREAMING_SNAKE_CASE__ , extra_args_str='''--sharded_ddp zero_dp_2''' , predict_with_generate=SCREAMING_SNAKE_CASE__ )
@unittest.skip('''Requires an update of the env running those tests''' )
@require_torch_multi_gpu
@require_fairscale
def __A ( self : Dict ) -> Tuple:
self.run_seqaseq_quick(
distributed=SCREAMING_SNAKE_CASE__ , extra_args_str='''--sharded_ddp zero_dp_2 --fp16''' , predict_with_generate=SCREAMING_SNAKE_CASE__ )
@require_apex
@require_torch_gpu
def __A ( self : Union[str, Any] ) -> List[str]:
# XXX: apex breaks the trainer if it's run twice e.g. run_seq2seq.main() from the same
# program and it breaks other tests that run from the same pytest worker, therefore until this is
# sorted out it must be run only in an external program, that is distributed=True in this
# test and only under one or more gpus - if we want cpu will need to make a special test
#
# specifically to the problem traced it to self.optimizer.step() - if it's run 2nd time via
# 2nd main() call it botches the future eval.
#
self.run_seqaseq_quick(distributed=SCREAMING_SNAKE_CASE__ , extra_args_str='''--fp16 --fp16_backend=apex''' )
# test 2nd time - was getting eval_loss': nan'
# to reproduce the problem set distributed=False
self.run_seqaseq_quick(distributed=SCREAMING_SNAKE_CASE__ , extra_args_str='''--fp16 --fp16_backend=apex''' )
@parameterized.expand(['''base''', '''low''', '''high''', '''mixed'''] )
@require_torch_multi_gpu
def __A ( self : Any , SCREAMING_SNAKE_CASE__ : List[str] ) -> Optional[Any]:
# as each sub-test is slow-ish split into multiple sub-tests to avoid CI timeout
__lowerCamelCase = {
# test with the default log_level - should be info and thus log info once
'''base''': {'''extra_args_str''': '''''', '''n_matches''': 1},
# test with low log_level and log_level_replica - should be noisy on all processes
# now the info string should appear twice on 2 processes
'''low''': {'''extra_args_str''': '''--log_level debug --log_level_replica debug''', '''n_matches''': 2},
# test with high log_level and low log_level_replica
# now the info string should appear once only on the replica
'''high''': {'''extra_args_str''': '''--log_level error --log_level_replica debug''', '''n_matches''': 1},
# test with high log_level and log_level_replica - should be quiet on all processes
'''mixed''': {'''extra_args_str''': '''--log_level error --log_level_replica error''', '''n_matches''': 0},
}
__lowerCamelCase = experiments[experiment_id]
__lowerCamelCase = {'''distributed''': True, '''predict_with_generate''': False, '''do_eval''': False, '''do_predict''': False}
__lowerCamelCase = '''Running training'''
with CaptureStderr() as cl:
self.run_seqaseq_quick(**SCREAMING_SNAKE_CASE__ , extra_args_str=data['''extra_args_str'''] )
__lowerCamelCase = len(re.findall(SCREAMING_SNAKE_CASE__ , cl.err ) )
self.assertEqual(SCREAMING_SNAKE_CASE__ , data['''n_matches'''] )
@slow
def __A ( self : Any ) -> Optional[Any]:
__lowerCamelCase = self.run_trainer(
eval_steps=2 , max_len=1_28 , model_name=SCREAMING_SNAKE_CASE__ , learning_rate=3e-4 , num_train_epochs=10 , distributed=SCREAMING_SNAKE_CASE__ , )
# Check metrics
__lowerCamelCase = TrainerState.load_from_json(os.path.join(SCREAMING_SNAKE_CASE__ , '''trainer_state.json''' ) ).log_history
__lowerCamelCase = [log for log in logs if '''eval_loss''' in log.keys()]
__lowerCamelCase = eval_metrics[0]
__lowerCamelCase = eval_metrics[-1]
assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing"
assert isinstance(last_step_stats['''eval_bleu'''] , SCREAMING_SNAKE_CASE__ )
# test if do_predict saves generations and metrics
__lowerCamelCase = os.listdir(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = {os.path.basename(SCREAMING_SNAKE_CASE__ ) for p in contents}
assert "generated_predictions.txt" in contents
assert "predict_results.json" in contents
@slow
@require_bitsandbytes
def __A ( self : Optional[int] ) -> str:
from transformers.training_args import OptimizerNames
def train_and_return_metrics(SCREAMING_SNAKE_CASE__ : str ) -> Tuple[int, float]:
__lowerCamelCase = '''--skip_memory_metrics 0'''
__lowerCamelCase = self.run_trainer(
max_len=1_28 , model_name=SCREAMING_SNAKE_CASE__ , learning_rate=3e-4 , num_train_epochs=1 , optim=SCREAMING_SNAKE_CASE__ , distributed=SCREAMING_SNAKE_CASE__ , extra_args_str=SCREAMING_SNAKE_CASE__ , do_eval=SCREAMING_SNAKE_CASE__ , do_predict=SCREAMING_SNAKE_CASE__ , n_gpus_to_use=1 , )
# Check metrics
__lowerCamelCase = TrainerState.load_from_json(Path(SCREAMING_SNAKE_CASE__ , '''trainer_state.json''' ) ).log_history
__lowerCamelCase = int(logs[0]['''train_mem_gpu_peaked_delta'''] / 2**20 )
__lowerCamelCase = int(logs[0]['''train_mem_gpu_alloc_delta'''] / 2**20 )
__lowerCamelCase = logs[0]['''train_loss''']
return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value )
__lowerCamelCase = gpu_alloc_mem_orig - gpu_alloc_mem_bnb
__lowerCamelCase = gpu_peak_mem_orig + gpu_alloc_mem_orig
__lowerCamelCase = gpu_peak_mem_bnb + gpu_alloc_mem_bnb
__lowerCamelCase = gpu_total_mem_orig - gpu_total_mem_bnb
# sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which
# doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized
# in 2 bytes and the diff in optim memory usage is derived as so:
#
# - normal 25*8=~200MB (8 bytes per param)
# - bnb 25*2= ~50MB (2 bytes per param)
#
# Thus we should expect ~150MB total memory saved.
#
# Peak memory should be the same - the total should be different by about that same margin
#
# After leaving a small margin to accommodate for differences between gpus let's check
# that we have at least 120MB in savings
__lowerCamelCase = 1_20
# uncomment the following if this test starts failing - requires py38 for a new print feature
# gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb
# print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB")
# print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB")
# print(f"{gpu_alloc_mem_diff=}MB")
# print(f"{gpu_peak_mem_diff=}MB")
# print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB")
# print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB")
self.assertGreater(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , '''should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got'''
f''' a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and'''
f''' gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB''' , )
self.assertGreater(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , '''should use ~150MB less total gpu memory with BNB, compared to without it for this model but got'''
f''' a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and'''
f''' gpu_total_mem_bnb={gpu_total_mem_bnb}MB''' , )
self.assertEqual(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , f'''loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}''' )
def __A ( self : Dict , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : float = 3e-3 , SCREAMING_SNAKE_CASE__ : str = "adafactor" , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : str = None , SCREAMING_SNAKE_CASE__ : int = 0 , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : int = None , ) -> List[Any]:
__lowerCamelCase = self.test_file_dir / '''../fixtures/tests_samples/wmt_en_ro'''
__lowerCamelCase = self.get_auto_remove_tmp_dir()
__lowerCamelCase = f'''
--model_name_or_path {model_name}
--train_file {data_dir}/train.json
--validation_file {data_dir}/val.json
--test_file {data_dir}/test.json
--output_dir {output_dir}
--overwrite_output_dir
--max_train_samples 8
--max_source_length {max_len}
--max_target_length {max_len}
--do_train
--num_train_epochs {str(SCREAMING_SNAKE_CASE__ )}
--per_device_train_batch_size 4
--learning_rate {learning_rate}
--warmup_steps 8
--logging_steps 0
--logging_strategy no
--save_steps {str(SCREAMING_SNAKE_CASE__ )}
--group_by_length
--label_smoothing_factor 0.1
--target_lang ro_RO
--source_lang en_XX
'''.split()
__lowerCamelCase = f'''
--do_eval
--per_device_eval_batch_size 4
--max_eval_samples 8
--val_max_target_length {max_len}
--evaluation_strategy steps
--eval_steps {str(SCREAMING_SNAKE_CASE__ )}
'''.split()
__lowerCamelCase = '''
--do_predict
'''.split()
__lowerCamelCase = []
if do_train:
args += args_train
if do_eval:
args += args_eval
if do_predict:
args += args_predict
if predict_with_generate:
args += "--predict_with_generate".split()
if do_train:
if optim == "adafactor":
args += "--adafactor".split()
else:
args += f'''--optim {optim}'''.split()
if extra_args_str is not None:
args += extra_args_str.split()
if distributed:
if n_gpus_to_use is None:
__lowerCamelCase = get_gpu_count()
__lowerCamelCase = get_torch_dist_unique_port()
__lowerCamelCase = f'''
-m torch.distributed.run
--nproc_per_node={n_gpus_to_use}
--master_port={master_port}
{self.examples_dir_str}/pytorch/translation/run_translation.py
'''.split()
__lowerCamelCase = [sys.executable] + distributed_args + args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(SCREAMING_SNAKE_CASE__ , env=self.get_env() )
else:
__lowerCamelCase = ['''run_translation.py'''] + args
with patch.object(SCREAMING_SNAKE_CASE__ , '''argv''' , SCREAMING_SNAKE_CASE__ ):
main()
return output_dir
| 270 | 0 |
'''simple docstring'''
import logging
from transformers.configuration_utils import PretrainedConfig
lowerCAmelCase : Dict =logging.getLogger(__name__)
class a_ ( __lowercase ):
__A = """masked_bert"""
def __init__( self : Union[str, Any] , lowercase : Optional[Any]=30_522 , lowercase : Any=768 , lowercase : List[Any]=12 , lowercase : Tuple=12 , lowercase : Tuple=3_072 , lowercase : List[str]="gelu" , lowercase : List[str]=0.1 , lowercase : Optional[Any]=0.1 , lowercase : str=512 , lowercase : List[Any]=2 , lowercase : List[str]=0.02 , lowercase : Dict=1e-1_2 , lowercase : int=0 , lowercase : Optional[int]="topK" , lowercase : str="constant" , lowercase : int=0.0 , **lowercase : Optional[Any] , ):
"""simple docstring"""
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
lowercase_ :Dict = vocab_size
lowercase_ :Any = hidden_size
lowercase_ :Optional[Any] = num_hidden_layers
lowercase_ :List[Any] = num_attention_heads
lowercase_ :Optional[int] = hidden_act
lowercase_ :List[str] = intermediate_size
lowercase_ :str = hidden_dropout_prob
lowercase_ :List[str] = attention_probs_dropout_prob
lowercase_ :Optional[int] = max_position_embeddings
lowercase_ :Tuple = type_vocab_size
lowercase_ :Tuple = initializer_range
lowercase_ :str = layer_norm_eps
lowercase_ :Dict = pruning_method
lowercase_ :Tuple = mask_init
lowercase_ :List[str] = mask_scale
| 223 |
import copy
from typing import Dict, List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
SCREAMING_SNAKE_CASE__ : Dict = {
"facebook/mask2former-swin-small-coco-instance": (
"https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json"
)
# See all Mask2Former models at https://huggingface.co/models?filter=mask2former
}
SCREAMING_SNAKE_CASE__ : int = logging.get_logger(__name__)
class lowerCAmelCase__ ( __lowercase ):
a__ : Any = """mask2former"""
a__ : Dict = ["""swin"""]
a__ : Any = {"""hidden_size""": """hidden_dim"""}
def __init__( self : Any , SCREAMING_SNAKE_CASE__ : Optional[Dict] = None , SCREAMING_SNAKE_CASE__ : int = 2_56 , SCREAMING_SNAKE_CASE__ : int = 2_56 , SCREAMING_SNAKE_CASE__ : int = 2_56 , SCREAMING_SNAKE_CASE__ : int = 10_24 , SCREAMING_SNAKE_CASE__ : str = "relu" , SCREAMING_SNAKE_CASE__ : int = 6 , SCREAMING_SNAKE_CASE__ : int = 10 , SCREAMING_SNAKE_CASE__ : int = 8 , SCREAMING_SNAKE_CASE__ : float = 0.0 , SCREAMING_SNAKE_CASE__ : int = 20_48 , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : int = 4 , SCREAMING_SNAKE_CASE__ : int = 2_55 , SCREAMING_SNAKE_CASE__ : int = 1_00 , SCREAMING_SNAKE_CASE__ : float = 0.1 , SCREAMING_SNAKE_CASE__ : float = 2.0 , SCREAMING_SNAKE_CASE__ : float = 5.0 , SCREAMING_SNAKE_CASE__ : float = 5.0 , SCREAMING_SNAKE_CASE__ : int = 1_25_44 , SCREAMING_SNAKE_CASE__ : float = 3.0 , SCREAMING_SNAKE_CASE__ : float = 0.75 , SCREAMING_SNAKE_CASE__ : float = 0.02 , SCREAMING_SNAKE_CASE__ : float = 1.0 , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : List[int] = [4, 8, 16, 32] , SCREAMING_SNAKE_CASE__ : bool = None , **SCREAMING_SNAKE_CASE__ : Tuple , ) -> str:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.''' )
__lowerCamelCase = CONFIG_MAPPING['''swin'''](
image_size=2_24 , in_channels=3 , patch_size=4 , embed_dim=96 , depths=[2, 2, 18, 2] , num_heads=[3, 6, 12, 24] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=SCREAMING_SNAKE_CASE__ , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] , )
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase = backbone_config.pop('''model_type''' )
__lowerCamelCase = CONFIG_MAPPING[backbone_model_type]
__lowerCamelCase = config_class.from_dict(SCREAMING_SNAKE_CASE__ )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f'''Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. '''
f'''Supported model types: {','.join(self.backbones_supported )}''' )
__lowerCamelCase = backbone_config
__lowerCamelCase = feature_size
__lowerCamelCase = mask_feature_size
__lowerCamelCase = hidden_dim
__lowerCamelCase = encoder_feedforward_dim
__lowerCamelCase = activation_function
__lowerCamelCase = encoder_layers
__lowerCamelCase = decoder_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = dropout
__lowerCamelCase = dim_feedforward
__lowerCamelCase = pre_norm
__lowerCamelCase = enforce_input_projection
__lowerCamelCase = common_stride
__lowerCamelCase = ignore_value
__lowerCamelCase = num_queries
__lowerCamelCase = no_object_weight
__lowerCamelCase = class_weight
__lowerCamelCase = mask_weight
__lowerCamelCase = dice_weight
__lowerCamelCase = train_num_points
__lowerCamelCase = oversample_ratio
__lowerCamelCase = importance_sample_ratio
__lowerCamelCase = init_std
__lowerCamelCase = init_xavier_std
__lowerCamelCase = use_auxiliary_loss
__lowerCamelCase = feature_strides
__lowerCamelCase = output_auxiliary_logits
__lowerCamelCase = decoder_layers
super().__init__(**SCREAMING_SNAKE_CASE__ )
@classmethod
def __A ( cls : Any , SCREAMING_SNAKE_CASE__ : PretrainedConfig , **SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> List[Any]:
return cls(
backbone_config=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
def __A ( self : Any ) -> Dict[str, any]:
__lowerCamelCase = copy.deepcopy(self.__dict__ )
__lowerCamelCase = self.backbone_config.to_dict()
__lowerCamelCase = self.__class__.model_type
return output
| 270 | 0 |
'''simple docstring'''
import json
import os
import unittest
from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast
from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase ( __lowercase , unittest.TestCase ):
__lowercase = OpenAIGPTTokenizer
__lowercase = OpenAIGPTTokenizerFast
__lowercase = True
__lowercase = False
def UpperCAmelCase_ ( self :Optional[Any] )-> int:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
A__ = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"w</w>",
"r</w>",
"t</w>",
"lo",
"low",
"er</w>",
"low</w>",
"lowest</w>",
"newer</w>",
"wider</w>",
"<unk>",
]
A__ = dict(zip(SCREAMING_SNAKE_CASE__ , range(len(SCREAMING_SNAKE_CASE__ ) ) ) )
A__ = ["#version: 0.2", "l o", "lo w", "e r</w>", ""]
A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE__ ) )
with open(self.merges_file , "w" ) as fp:
fp.write("\n".join(SCREAMING_SNAKE_CASE__ ) )
def UpperCAmelCase_ ( self :Optional[int] , lowercase_ :Union[str, Any] )-> List[str]:
return "lower newer", "lower newer"
def UpperCAmelCase_ ( self :Optional[Any] )-> List[str]:
A__ = OpenAIGPTTokenizer(self.vocab_file , self.merges_file )
A__ = "lower"
A__ = ["low", "er</w>"]
A__ = tokenizer.tokenize(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
A__ = tokens + ["<unk>"]
A__ = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
def UpperCAmelCase_ ( self :Tuple , lowercase_ :Optional[int]=15 )-> Tuple:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
A__ = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
# Simple input
A__ = "This is a simple input"
A__ = ["This is a simple input 1", "This is a simple input 2"]
A__ = ("This is a simple input", "This is a pair")
A__ = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(SCREAMING_SNAKE_CASE__ , tokenizer_r.encode , SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , padding="max_length" )
# Simple input
self.assertRaises(SCREAMING_SNAKE_CASE__ , tokenizer_r.encode_plus , SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , padding="max_length" )
# Simple input
self.assertRaises(
SCREAMING_SNAKE_CASE__ , tokenizer_r.batch_encode_plus , SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , padding="max_length" , )
# Pair input
self.assertRaises(SCREAMING_SNAKE_CASE__ , tokenizer_r.encode , SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , padding="max_length" )
# Pair input
self.assertRaises(SCREAMING_SNAKE_CASE__ , tokenizer_r.encode_plus , SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , padding="max_length" )
# Pair input
self.assertRaises(
SCREAMING_SNAKE_CASE__ , tokenizer_r.batch_encode_plus , SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , padding="max_length" , )
def UpperCAmelCase_ ( self :Any )-> Tuple:
pass
@require_ftfy
@require_spacy
@require_tokenizers
class UpperCAmelCase ( __lowercase ):
pass
| 237 |
import os
def __magic_name__ ( ) -> str:
__lowerCamelCase = os.path.join(os.path.dirname(__lowerCAmelCase ) , '''num.txt''' )
with open(__lowerCAmelCase ) as file_hand:
return str(sum(int(__lowerCAmelCase ) for line in file_hand ) )[:10]
if __name__ == "__main__":
print(solution())
| 270 | 0 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
# Register SEW's fairseq modules
from sew_asapp import tasks # noqa: F401
from transformers import (
SEWConfig,
SEWForCTC,
SEWModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
"post_extract_proj": "feature_projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.upsample.0": "encoder.upsample.projection",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "layer_norm",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Tuple , SCREAMING_SNAKE_CASE_: int , SCREAMING_SNAKE_CASE_: Tuple , SCREAMING_SNAKE_CASE_: List[Any] , SCREAMING_SNAKE_CASE_: Tuple ) -> Optional[int]:
'''simple docstring'''
for attribute in key.split("." ):
A__ = getattr(__lowerCAmelCase , __lowerCAmelCase )
if weight_type is not None:
A__ = getattr(__lowerCAmelCase , __lowerCAmelCase ).shape
else:
A__ = hf_pointer.shape
assert hf_shape == value.shape, (
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}'
)
if weight_type == "weight":
A__ = value
elif weight_type == "weight_g":
A__ = value
elif weight_type == "weight_v":
A__ = value
elif weight_type == "bias":
A__ = value
else:
A__ = value
logger.info(F'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: str , SCREAMING_SNAKE_CASE_: Tuple , SCREAMING_SNAKE_CASE_: List[Any] ) -> List[str]:
'''simple docstring'''
A__ = []
A__ = fairseq_model.state_dict()
A__ = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
A__ = False
if "conv_layers" in name:
load_conv_layer(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , hf_model.config.feat_extract_norm == "group" , )
A__ = True
else:
for key, mapped_key in MAPPING.items():
A__ = "sew." + mapped_key if (is_finetuned and mapped_key != "lm_head") else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
A__ = True
if "*" in mapped_key:
A__ = name.split(__lowerCAmelCase )[0].split("." )[-2]
A__ = mapped_key.replace("*" , __lowerCAmelCase )
if "weight_g" in name:
A__ = "weight_g"
elif "weight_v" in name:
A__ = "weight_v"
elif "weight" in name:
A__ = "weight"
elif "bias" in name:
A__ = "bias"
else:
A__ = None
set_recursively(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
continue
if not is_used:
unused_weights.append(__lowerCAmelCase )
logger.warning(F'Unused weights: {unused_weights}' )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Optional[Any] , SCREAMING_SNAKE_CASE_: Optional[Any] , SCREAMING_SNAKE_CASE_: Union[str, Any] , SCREAMING_SNAKE_CASE_: Tuple , SCREAMING_SNAKE_CASE_: List[Any] ) -> Optional[Any]:
'''simple docstring'''
A__ = full_name.split("conv_layers." )[-1]
A__ = name.split("." )
A__ = int(items[0] )
A__ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
A__ = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
A__ = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
A__ = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
A__ = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(__lowerCAmelCase )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Optional[int] , SCREAMING_SNAKE_CASE_: List[str] ) -> List[Any]:
'''simple docstring'''
A__ = SEWConfig()
if is_finetuned:
A__ = model.wav_encoder.wav_model.cfg
else:
A__ = model.cfg
A__ = fs_config.conv_bias
A__ = eval(fs_config.conv_feature_layers )
A__ = [x[0] for x in conv_layers]
A__ = [x[1] for x in conv_layers]
A__ = [x[2] for x in conv_layers]
A__ = "gelu"
A__ = "layer" if fs_config.extractor_mode == "layer_norm" else "group"
A__ = 0.0
A__ = fs_config.activation_fn.name
A__ = fs_config.encoder_embed_dim
A__ = 0.02
A__ = fs_config.encoder_ffn_embed_dim
A__ = 1e-5
A__ = fs_config.encoder_layerdrop
A__ = fs_config.encoder_attention_heads
A__ = fs_config.conv_pos_groups
A__ = fs_config.conv_pos
A__ = len(__lowerCAmelCase )
A__ = fs_config.encoder_layers
A__ = fs_config.squeeze_factor
# take care of any params that are overridden by the Wav2VecCtc model
if is_finetuned:
A__ = model.cfg
A__ = fs_config.final_dropout
A__ = fs_config.layerdrop
A__ = fs_config.activation_dropout
A__ = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0
A__ = fs_config.attention_dropout
A__ = fs_config.dropout_input
A__ = fs_config.dropout
A__ = fs_config.mask_channel_length
A__ = fs_config.mask_channel_prob
A__ = fs_config.mask_length
A__ = fs_config.mask_prob
A__ = "Wav2Vec2FeatureExtractor"
A__ = "Wav2Vec2CTCTokenizer"
return config
@torch.no_grad()
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Union[str, Any] , SCREAMING_SNAKE_CASE_: str , SCREAMING_SNAKE_CASE_: Optional[Any]=None , SCREAMING_SNAKE_CASE_: Union[str, Any]=None , SCREAMING_SNAKE_CASE_: Tuple=True ) -> int:
'''simple docstring'''
if is_finetuned:
A__ , A__ , A__ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
else:
A__ , A__ , A__ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
if config_path is not None:
A__ = SEWConfig.from_pretrained(__lowerCAmelCase )
else:
A__ = convert_config(model[0] , __lowerCAmelCase )
A__ = model[0].eval()
A__ = True if config.feat_extract_norm == "layer" else False
A__ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , )
if is_finetuned:
if dict_path:
A__ = Dictionary.load(__lowerCAmelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
A__ = target_dict.pad_index
A__ = target_dict.bos_index
A__ = target_dict.pad_index
A__ = target_dict.bos_index
A__ = target_dict.eos_index
A__ = len(target_dict.symbols )
A__ = os.path.join(__lowerCAmelCase , "vocab.json" )
if not os.path.isdir(__lowerCAmelCase ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(__lowerCAmelCase ) )
return
os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase )
with open(__lowerCAmelCase , "w" , encoding="utf-8" ) as vocab_handle:
json.dump(target_dict.indices , __lowerCAmelCase )
A__ = WavaVecaCTCTokenizer(
__lowerCAmelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=__lowerCAmelCase , )
A__ = WavaVecaProcessor(feature_extractor=__lowerCAmelCase , tokenizer=__lowerCAmelCase )
processor.save_pretrained(__lowerCAmelCase )
A__ = SEWForCTC(__lowerCAmelCase )
else:
A__ = SEWModel(__lowerCAmelCase )
feature_extractor.save_pretrained(__lowerCAmelCase )
recursively_load_weights(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
hf_model.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--is_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
lowerCAmelCase__ = parser.parse_args()
convert_sew_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned
)
| 68 |
import logging
import os
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
from filelock import FileLock
from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available
SCREAMING_SNAKE_CASE__ : Optional[int] = logging.getLogger(__name__)
@dataclass
class lowerCAmelCase__ :
a__ : str
a__ : List[str]
a__ : Optional[List[str]]
@dataclass
class lowerCAmelCase__ :
a__ : List[int]
a__ : List[int]
a__ : Optional[List[int]] = None
a__ : Optional[List[int]] = None
class lowerCAmelCase__ ( __lowercase ):
a__ : Optional[Any] = """train"""
a__ : Optional[int] = """dev"""
a__ : Dict = """test"""
class lowerCAmelCase__ :
@staticmethod
def __A ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Union[Split, str] ) -> List[InputExample]:
raise NotImplementedError
@staticmethod
def __A ( SCREAMING_SNAKE_CASE__ : str ) -> List[str]:
raise NotImplementedError
@staticmethod
def __A ( SCREAMING_SNAKE_CASE__ : List[InputExample] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : PreTrainedTokenizer , SCREAMING_SNAKE_CASE__ : Optional[Any]=False , SCREAMING_SNAKE_CASE__ : List[str]="[CLS]" , SCREAMING_SNAKE_CASE__ : Tuple=1 , SCREAMING_SNAKE_CASE__ : str="[SEP]" , SCREAMING_SNAKE_CASE__ : List[Any]=False , SCREAMING_SNAKE_CASE__ : Union[str, Any]=False , SCREAMING_SNAKE_CASE__ : Tuple=0 , SCREAMING_SNAKE_CASE__ : int=0 , SCREAMING_SNAKE_CASE__ : str=-1_00 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0 , SCREAMING_SNAKE_CASE__ : List[Any]=True , ) -> List[InputFeatures]:
__lowerCamelCase = {label: i for i, label in enumerate(SCREAMING_SNAKE_CASE__ )}
__lowerCamelCase = []
for ex_index, example in enumerate(SCREAMING_SNAKE_CASE__ ):
if ex_index % 1_00_00 == 0:
logger.info('''Writing example %d of %d''' , SCREAMING_SNAKE_CASE__ , len(SCREAMING_SNAKE_CASE__ ) )
__lowerCamelCase = []
__lowerCamelCase = []
for word, label in zip(example.words , example.labels ):
__lowerCamelCase = tokenizer.tokenize(SCREAMING_SNAKE_CASE__ )
# bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space.
if len(SCREAMING_SNAKE_CASE__ ) > 0:
tokens.extend(SCREAMING_SNAKE_CASE__ )
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(SCREAMING_SNAKE_CASE__ ) - 1) )
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
__lowerCamelCase = tokenizer.num_special_tokens_to_add()
if len(SCREAMING_SNAKE_CASE__ ) > max_seq_length - special_tokens_count:
__lowerCamelCase = tokens[: (max_seq_length - special_tokens_count)]
__lowerCamelCase = label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
label_ids += [pad_token_label_id]
__lowerCamelCase = [sequence_a_segment_id] * len(SCREAMING_SNAKE_CASE__ )
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
__lowerCamelCase = [cls_token] + tokens
__lowerCamelCase = [pad_token_label_id] + label_ids
__lowerCamelCase = [cls_token_segment_id] + segment_ids
__lowerCamelCase = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
__lowerCamelCase = [1 if mask_padding_with_zero else 0] * len(SCREAMING_SNAKE_CASE__ )
# Zero-pad up to the sequence length.
__lowerCamelCase = max_seq_length - len(SCREAMING_SNAKE_CASE__ )
if pad_on_left:
__lowerCamelCase = ([pad_token] * padding_length) + input_ids
__lowerCamelCase = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
__lowerCamelCase = ([pad_token_segment_id] * padding_length) + segment_ids
__lowerCamelCase = ([pad_token_label_id] * padding_length) + label_ids
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
assert len(SCREAMING_SNAKE_CASE__ ) == max_seq_length
assert len(SCREAMING_SNAKE_CASE__ ) == max_seq_length
assert len(SCREAMING_SNAKE_CASE__ ) == max_seq_length
assert len(SCREAMING_SNAKE_CASE__ ) == max_seq_length
if ex_index < 5:
logger.info('''*** Example ***''' )
logger.info('''guid: %s''' , example.guid )
logger.info('''tokens: %s''' , ''' '''.join([str(SCREAMING_SNAKE_CASE__ ) for x in tokens] ) )
logger.info('''input_ids: %s''' , ''' '''.join([str(SCREAMING_SNAKE_CASE__ ) for x in input_ids] ) )
logger.info('''input_mask: %s''' , ''' '''.join([str(SCREAMING_SNAKE_CASE__ ) for x in input_mask] ) )
logger.info('''segment_ids: %s''' , ''' '''.join([str(SCREAMING_SNAKE_CASE__ ) for x in segment_ids] ) )
logger.info('''label_ids: %s''' , ''' '''.join([str(SCREAMING_SNAKE_CASE__ ) for x in label_ids] ) )
if "token_type_ids" not in tokenizer.model_input_names:
__lowerCamelCase = None
features.append(
InputFeatures(
input_ids=SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , label_ids=SCREAMING_SNAKE_CASE__ ) )
return features
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
class lowerCAmelCase__ ( __lowercase ):
a__ : List[InputFeatures]
a__ : int = nn.CrossEntropyLoss().ignore_index
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : TokenClassificationTask , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : PreTrainedTokenizer , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : Union[str, Any]=False , SCREAMING_SNAKE_CASE__ : Split = Split.train , ) -> Union[str, Any]:
# Load data features from cache or dataset file
__lowerCamelCase = os.path.join(
SCREAMING_SNAKE_CASE__ , '''cached_{}_{}_{}'''.format(mode.value , tokenizer.__class__.__name__ , str(SCREAMING_SNAKE_CASE__ ) ) , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
__lowerCamelCase = cached_features_file + '''.lock'''
with FileLock(SCREAMING_SNAKE_CASE__ ):
if os.path.exists(SCREAMING_SNAKE_CASE__ ) and not overwrite_cache:
logger.info(f'''Loading features from cached file {cached_features_file}''' )
__lowerCamelCase = torch.load(SCREAMING_SNAKE_CASE__ )
else:
logger.info(f'''Creating features from dataset file at {data_dir}''' )
__lowerCamelCase = token_classification_task.read_examples_from_file(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# TODO clean up all this to leverage built-in features of tokenizers
__lowerCamelCase = token_classification_task.convert_examples_to_features(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , cls_token_at_end=bool(model_type in ['''xlnet'''] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['''xlnet'''] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=SCREAMING_SNAKE_CASE__ , pad_on_left=bool(tokenizer.padding_side == '''left''' ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info(f'''Saving features into cached file {cached_features_file}''' )
torch.save(self.features , SCREAMING_SNAKE_CASE__ )
def __len__( self : Dict ) -> str:
return len(self.features )
def __getitem__( self : Any , SCREAMING_SNAKE_CASE__ : Dict ) -> InputFeatures:
return self.features[i]
if is_tf_available():
import tensorflow as tf
class lowerCAmelCase__ :
a__ : List[InputFeatures]
a__ : int = -100
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : TokenClassificationTask , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : PreTrainedTokenizer , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : Optional[Any]=False , SCREAMING_SNAKE_CASE__ : Split = Split.train , ) -> List[Any]:
__lowerCamelCase = token_classification_task.read_examples_from_file(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# TODO clean up all this to leverage built-in features of tokenizers
__lowerCamelCase = token_classification_task.convert_examples_to_features(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , cls_token_at_end=bool(model_type in ['''xlnet'''] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['''xlnet'''] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=SCREAMING_SNAKE_CASE__ , pad_on_left=bool(tokenizer.padding_side == '''left''' ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
def gen():
for ex in self.features:
if ex.token_type_ids is None:
yield (
{"input_ids": ex.input_ids, "attention_mask": ex.attention_mask},
ex.label_ids,
)
else:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label_ids,
)
if "token_type_ids" not in tokenizer.model_input_names:
__lowerCamelCase = tf.data.Dataset.from_generator(
SCREAMING_SNAKE_CASE__ , ({'''input_ids''': tf.intaa, '''attention_mask''': tf.intaa}, tf.intaa) , (
{'''input_ids''': tf.TensorShape([None] ), '''attention_mask''': tf.TensorShape([None] )},
tf.TensorShape([None] ),
) , )
else:
__lowerCamelCase = tf.data.Dataset.from_generator(
SCREAMING_SNAKE_CASE__ , ({'''input_ids''': tf.intaa, '''attention_mask''': tf.intaa, '''token_type_ids''': tf.intaa}, tf.intaa) , (
{
'''input_ids''': tf.TensorShape([None] ),
'''attention_mask''': tf.TensorShape([None] ),
'''token_type_ids''': tf.TensorShape([None] ),
},
tf.TensorShape([None] ),
) , )
def __A ( self : Union[str, Any] ) -> Union[str, Any]:
__lowerCamelCase = self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features ) ) )
return self.dataset
def __len__( self : List[Any] ) -> Any:
return len(self.features )
def __getitem__( self : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> InputFeatures:
return self.features[i]
| 270 | 0 |
"""simple docstring"""
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class __lowerCamelCase ( __lowercase ):
'''simple docstring'''
def _UpperCAmelCase ( self ) -> Union[str, Any]:
_a = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''width_multiplier''' ) )
class __lowerCamelCase :
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=64 , __UpperCAmelCase=2 , __UpperCAmelCase=3 , __UpperCAmelCase="swish" , __UpperCAmelCase=3 , __UpperCAmelCase=32 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.02 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=10 , __UpperCAmelCase=None , __UpperCAmelCase=0.25 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , ) -> Optional[Any]:
_a = parent
_a = batch_size
_a = image_size
_a = patch_size
_a = num_channels
_a = make_divisible(512 * width_multiplier , divisor=8 )
_a = hidden_act
_a = conv_kernel_size
_a = output_stride
_a = classifier_dropout_prob
_a = use_labels
_a = is_training
_a = num_labels
_a = initializer_range
_a = scope
_a = width_multiplier
_a = ffn_dropout
_a = attn_dropout
def _UpperCAmelCase ( self ) -> Optional[Any]:
_a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_a = None
_a = None
if self.use_labels:
_a = ids_tensor([self.batch_size] , self.num_labels )
_a = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
_a = self.get_config()
return config, pixel_values, labels, pixel_labels
def _UpperCAmelCase ( self ) -> int:
return MobileViTVaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Any:
_a = MobileViTVaModel(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
_a = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]:
_a = self.num_labels
_a = MobileViTVaForImageClassification(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
_a = model(SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Any:
_a = self.num_labels
_a = MobileViTVaForSemanticSegmentation(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
_a = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
_a = model(SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def _UpperCAmelCase ( self ) -> Dict:
_a = self.prepare_config_and_inputs()
_a , _a , _a , _a = config_and_inputs
_a = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __lowerCamelCase ( __lowercase , __lowercase , unittest.TestCase ):
'''simple docstring'''
A_ : Optional[int] = (
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
A_ : Any = (
{
"""feature-extraction""": MobileViTVaModel,
"""image-classification""": MobileViTVaForImageClassification,
"""image-segmentation""": MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
A_ : List[str] = False
A_ : Optional[Any] = False
A_ : str = False
A_ : Optional[Any] = False
def _UpperCAmelCase ( self ) -> Optional[Any]:
_a = MobileViTVaModelTester(self )
_a = MobileViTVaConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , has_text_modality=SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( self ) -> Tuple:
self.config_tester.run_common_tests()
@unittest.skip(reason='''MobileViTV2 does not use inputs_embeds''' )
def _UpperCAmelCase ( self ) -> str:
pass
@unittest.skip(reason='''MobileViTV2 does not support input and output embeddings''' )
def _UpperCAmelCase ( self ) -> Optional[Any]:
pass
@unittest.skip(reason='''MobileViTV2 does not output attentions''' )
def _UpperCAmelCase ( self ) -> Tuple:
pass
@require_torch_multi_gpu
@unittest.skip(reason='''Got `CUDA error: misaligned address` for tests after this one being run.''' )
def _UpperCAmelCase ( self ) -> List[Any]:
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def _UpperCAmelCase ( self ) -> Dict:
pass
def _UpperCAmelCase ( self ) -> Any:
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a = model_class(SCREAMING_SNAKE_CASE__ )
_a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a = [*signature.parameters.keys()]
_a = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( self ) -> Any:
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( self ) -> Optional[Any]:
def check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
_a = model_class(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
with torch.no_grad():
_a = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
_a = outputs.hidden_states
_a = 5
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
_a = 2
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a = True
check_hidden_states_output(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_a = True
check_hidden_states_output(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( self ) -> Optional[Any]:
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( self ) -> Tuple:
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*SCREAMING_SNAKE_CASE__ )
@slow
def _UpperCAmelCase ( self ) -> Any:
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a = MobileViTVaModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
def A_ ( ):
"""simple docstring"""
_a = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _UpperCAmelCase ( self ) -> Dict:
return (
MobileViTImageProcessor.from_pretrained('''apple/mobilevitv2-1.0-imagenet1k-256''' )
if is_vision_available()
else None
)
@slow
def _UpperCAmelCase ( self ) -> Union[str, Any]:
_a = MobileViTVaForImageClassification.from_pretrained('''apple/mobilevitv2-1.0-imagenet1k-256''' ).to(
SCREAMING_SNAKE_CASE__ )
_a = self.default_image_processor
_a = prepare_img()
_a = image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' ).to(SCREAMING_SNAKE_CASE__ )
# forward pass
with torch.no_grad():
_a = model(**SCREAMING_SNAKE_CASE__ )
# verify the logits
_a = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE__ )
_a = torch.tensor([-1.6_3_3_6e0_0, -7.3_2_0_4e-0_2, -5.1_8_8_3e-0_1] ).to(SCREAMING_SNAKE_CASE__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE__ , atol=1e-4 ) )
@slow
def _UpperCAmelCase ( self ) -> Any:
_a = MobileViTVaForSemanticSegmentation.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
_a = model.to(SCREAMING_SNAKE_CASE__ )
_a = MobileViTImageProcessor.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
_a = prepare_img()
_a = image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' ).to(SCREAMING_SNAKE_CASE__ )
# forward pass
with torch.no_grad():
_a = model(**SCREAMING_SNAKE_CASE__ )
_a = outputs.logits
# verify the logits
_a = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , SCREAMING_SNAKE_CASE__ )
_a = torch.tensor(
[
[[7.0863, 7.1525, 6.8201], [6.6931, 6.8770, 6.8933], [6.2978, 7.0366, 6.9636]],
[[-3.7134, -3.6712, -3.6675], [-3.5825, -3.3549, -3.4777], [-3.3435, -3.3979, -3.2857]],
[[-2.9329, -2.8003, -2.7369], [-3.0564, -2.4780, -2.0207], [-2.6889, -1.9298, -1.7640]],
] , device=SCREAMING_SNAKE_CASE__ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=1e-4 ) )
@slow
def _UpperCAmelCase ( self ) -> Optional[int]:
_a = MobileViTVaForSemanticSegmentation.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
_a = model.to(SCREAMING_SNAKE_CASE__ )
_a = MobileViTImageProcessor.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
_a = prepare_img()
_a = image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' ).to(SCREAMING_SNAKE_CASE__ )
# forward pass
with torch.no_grad():
_a = model(**SCREAMING_SNAKE_CASE__ )
_a = outputs.logits.detach().cpu()
_a = image_processor.post_process_semantic_segmentation(outputs=SCREAMING_SNAKE_CASE__ , target_sizes=[(50, 60)] )
_a = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , SCREAMING_SNAKE_CASE__ )
_a = image_processor.post_process_semantic_segmentation(outputs=SCREAMING_SNAKE_CASE__ )
_a = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , SCREAMING_SNAKE_CASE__ ) | 320 |
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class lowerCAmelCase__ ( __lowercase ):
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : NestedDataStructureLike[PathLike] , SCREAMING_SNAKE_CASE__ : Optional[NamedSplit] = None , SCREAMING_SNAKE_CASE__ : Optional[Features] = None , SCREAMING_SNAKE_CASE__ : str = None , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : Optional[int] = None , **SCREAMING_SNAKE_CASE__ : str , ) -> Union[str, Any]:
super().__init__(
SCREAMING_SNAKE_CASE__ , split=SCREAMING_SNAKE_CASE__ , features=SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ , keep_in_memory=SCREAMING_SNAKE_CASE__ , streaming=SCREAMING_SNAKE_CASE__ , num_proc=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
__lowerCamelCase = path_or_paths if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else {self.split: path_or_paths}
__lowerCamelCase = Text(
cache_dir=SCREAMING_SNAKE_CASE__ , data_files=SCREAMING_SNAKE_CASE__ , features=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
def __A ( self : int ) -> Dict:
# Build iterable dataset
if self.streaming:
__lowerCamelCase = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
__lowerCamelCase = None
__lowerCamelCase = None
__lowerCamelCase = None
__lowerCamelCase = None
self.builder.download_and_prepare(
download_config=SCREAMING_SNAKE_CASE__ , download_mode=SCREAMING_SNAKE_CASE__ , verification_mode=SCREAMING_SNAKE_CASE__ , base_path=SCREAMING_SNAKE_CASE__ , num_proc=self.num_proc , )
__lowerCamelCase = self.builder.as_dataset(
split=self.split , verification_mode=SCREAMING_SNAKE_CASE__ , in_memory=self.keep_in_memory )
return dataset
| 270 | 0 |
def UpperCamelCase ( __magic_name__ : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
lowercase__ = 1
lowercase__ = 2
while i * i <= n:
lowercase__ = 0
while n % i == 0:
n //= i
multiplicity += 1
n_divisors *= multiplicity + 1
i += 1
if n > 1:
n_divisors *= 2
return n_divisors
def UpperCamelCase ( ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = 1
lowercase__ = 1
while True:
i += 1
t_num += i
if count_divisors(__lowerCAmelCase ) > 500:
break
return t_num
if __name__ == "__main__":
print(solution())
| 305 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 270 | 0 |
'''simple docstring'''
UpperCAmelCase_ = "\n# Installazione di Transformers\n! pip install transformers datasets\n# Per installare dalla fonte invece dell'ultima versione rilasciata, commenta il comando sopra e\n# rimuovi la modalità commento al comando seguente.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
UpperCAmelCase_ = [{"type": "code", "content": INSTALL_CONTENT}]
UpperCAmelCase_ = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 346 |
import sacrebleu as scb
from packaging import version
from sacrebleu import CHRF
import datasets
SCREAMING_SNAKE_CASE__ : Union[str, Any] = "\\n@inproceedings{popovic-2015-chrf,\n title = \"chr{F}: character n-gram {F}-score for automatic {MT} evaluation\",\n author = \"Popovi{\'c}, Maja\",\n booktitle = \"Proceedings of the Tenth Workshop on Statistical Machine Translation\",\n month = sep,\n year = \"2015\",\n address = \"Lisbon, Portugal\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/W15-3049\",\n doi = \"10.18653/v1/W15-3049\",\n pages = \"392--395\",\n}\n@inproceedings{popovic-2017-chrf,\n title = \"chr{F}++: words helping character n-grams\",\n author = \"Popovi{\'c}, Maja\",\n booktitle = \"Proceedings of the Second Conference on Machine Translation\",\n month = sep,\n year = \"2017\",\n address = \"Copenhagen, Denmark\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/W17-4770\",\n doi = \"10.18653/v1/W17-4770\",\n pages = \"612--618\",\n}\n@inproceedings{post-2018-call,\n title = \"A Call for Clarity in Reporting {BLEU} Scores\",\n author = \"Post, Matt\",\n booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",\n month = oct,\n year = \"2018\",\n address = \"Belgium, Brussels\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W18-6319\",\n pages = \"186--191\",\n}\n"
SCREAMING_SNAKE_CASE__ : int = "\\nChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,\nand ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation\nthat is already present in sacrebleu.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu's required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.\n"
SCREAMING_SNAKE_CASE__ : Optional[Any] = "\nProduces ChrF(++) scores for hypotheses given reference translations.\n\nArgs:\n predictions (list of str): The predicted sentences.\n references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.\n char_order (int): Character n-gram order. Defaults to `6`.\n word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.\n beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.\n lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.\n whitespace (bool): If `True`, include whitespaces when extracting character n-grams.\n eps_smoothing (bool): If `True`, applies epsilon smoothing similar\n to reference chrF++.py, NLTK and Moses implementations. If `False`,\n it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.\n\nReturns:\n 'score' (float): The chrF (chrF++) score,\n 'char_order' (int): The character n-gram order,\n 'word_order' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,\n 'beta' (int): Determine the importance of recall w.r.t precision\n\nExamples:\n Example 1--a simple example of calculating chrF:\n >>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]\n >>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]\n >>> chrf = datasets.load_metric(\"chrf\")\n >>> results = chrf.compute(predictions=prediction, references=reference)\n >>> print(results)\n {'score': 84.64214891738334, 'char_order': 6, 'word_order': 0, 'beta': 2}\n\n Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:\n >>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]\n >>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]\n >>> chrf = datasets.load_metric(\"chrf\")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2)\n >>> print(results)\n {'score': 82.87263732906315, 'char_order': 6, 'word_order': 2, 'beta': 2}\n\n Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:\n >>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]\n >>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]\n >>> chrf = datasets.load_metric(\"chrf\")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2,\n ... lowercase=True)\n >>> print(results)\n {'score': 92.12853119829202, 'char_order': 6, 'word_order': 2, 'beta': 2}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase__ ( datasets.Metric ):
def __A ( self : Dict ) -> Tuple:
if version.parse(scb.__version__ ) < version.parse('''1.4.12''' ):
raise ImportWarning(
'''To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n'''
'''You can install it with `pip install "sacrebleu>=1.4.12"`.''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://github.com/mjpost/sacreBLEU#chrf--chrf''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''' ) , id='''references''' ),
} ) , codebase_urls=['''https://github.com/mjpost/sacreBLEU#chrf--chrf'''] , reference_urls=[
'''https://github.com/m-popovic/chrF''',
] , )
def __A ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : int = CHRF.CHAR_ORDER , SCREAMING_SNAKE_CASE__ : int = CHRF.WORD_ORDER , SCREAMING_SNAKE_CASE__ : int = CHRF.BETA , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : bool = False , ) -> Optional[Any]:
__lowerCamelCase = len(references[0] )
if any(len(SCREAMING_SNAKE_CASE__ ) != references_per_prediction for refs in references ):
raise ValueError('''Sacrebleu requires the same number of references for each prediction''' )
__lowerCamelCase = [[refs[i] for refs in references] for i in range(SCREAMING_SNAKE_CASE__ )]
__lowerCamelCase = CHRF(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = sb_chrf.corpus_score(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return {
"score": output.score,
"char_order": output.char_order,
"word_order": output.word_order,
"beta": output.beta,
}
| 270 | 0 |
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
A : Any = logging.get_logger(__name__)
A : Tuple = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn.grep_linear": "encoder.layers.*.attention.gru_rel_pos_linear",
"self_attn.relative_attention_bias": "encoder.layers.*.attention.rel_attn_embed",
"self_attn.grep_a": "encoder.layers.*.attention.gru_rel_pos_const",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "ctc_proj",
"mask_emb": "masked_spec_embed",
}
A : Tuple = [
"ctc_proj",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def lowercase_ ( _A : Any , _A : Tuple , _A : List[Any] , _A : List[Any] , _A : List[Any] ):
"""simple docstring"""
for attribute in key.split("." ):
lowerCamelCase__ : List[Any] = getattr(__lowerCAmelCase , __lowerCAmelCase )
if weight_type is not None:
lowerCamelCase__ : str = getattr(__lowerCAmelCase , __lowerCAmelCase ).shape
else:
lowerCamelCase__ : str = hf_pointer.shape
assert hf_shape == value.shape, (
F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
F" {value.shape} for {full_name}"
)
if weight_type == "weight":
lowerCamelCase__ : Tuple = value
elif weight_type == "weight_g":
lowerCamelCase__ : List[Any] = value
elif weight_type == "weight_v":
lowerCamelCase__ : int = value
elif weight_type == "bias":
lowerCamelCase__ : Dict = value
else:
lowerCamelCase__ : Any = value
logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def lowercase_ ( _A : Dict , _A : Union[str, Any] ):
"""simple docstring"""
lowerCamelCase__ : Optional[int] = []
lowerCamelCase__ : List[Any] = fairseq_model.state_dict()
lowerCamelCase__ : Optional[int] = hf_model.feature_extractor
for name, value in fairseq_dict.items():
lowerCamelCase__ : int = False
if "conv_layers" in name:
load_conv_layer(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , hf_model.config.feat_extract_norm == "group" , )
lowerCamelCase__ : str = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
lowerCamelCase__ : Any = True
if "*" in mapped_key:
lowerCamelCase__ : Union[str, Any] = name.split(__lowerCAmelCase )[0].split("." )[-2]
lowerCamelCase__ : Tuple = mapped_key.replace("*" , __lowerCAmelCase )
if "weight_g" in name:
lowerCamelCase__ : Any = "weight_g"
elif "weight_v" in name:
lowerCamelCase__ : List[Any] = "weight_v"
elif "bias" in name and "relative_attention_bias" not in name:
lowerCamelCase__ : Union[str, Any] = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowerCamelCase__ : Dict = "weight"
else:
lowerCamelCase__ : str = None
set_recursively(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
continue
if not is_used:
unused_weights.append(__lowerCAmelCase )
logger.warning(F"Unused weights: {unused_weights}" )
def lowercase_ ( _A : Tuple , _A : Any , _A : Optional[Any] , _A : Optional[int] , _A : List[Any] ):
"""simple docstring"""
lowerCamelCase__ : List[str] = full_name.split("conv_layers." )[-1]
lowerCamelCase__ : List[Any] = name.split("." )
lowerCamelCase__ : Optional[int] = int(items[0] )
lowerCamelCase__ : Tuple = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
)
lowerCamelCase__ : int = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
)
lowerCamelCase__ : Tuple = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"
" found."
)
lowerCamelCase__ : List[str] = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."
)
lowerCamelCase__ : Union[str, Any] = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(__lowerCAmelCase )
@torch.no_grad()
def lowercase_ ( _A : Union[str, Any] , _A : Any , _A : List[str]=None ):
"""simple docstring"""
lowerCamelCase__ : Tuple = torch.load(__lowerCAmelCase )
lowerCamelCase__ : List[str] = WavLMConfigOrig(checkpoint["cfg"] )
lowerCamelCase__ : Any = WavLMOrig(__lowerCAmelCase )
model.load_state_dict(checkpoint["model"] )
model.eval()
if config_path is not None:
lowerCamelCase__ : int = WavLMConfig.from_pretrained(__lowerCAmelCase )
else:
lowerCamelCase__ : Union[str, Any] = WavLMConfig()
lowerCamelCase__ : Optional[int] = WavLMModel(__lowerCAmelCase )
recursively_load_weights(__lowerCAmelCase , __lowerCAmelCase )
hf_wavlm.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
A : Optional[int] = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
A : Any = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 184 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE__ : List[Any] = {
"configuration_resnet": ["RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "ResNetConfig", "ResNetOnnxConfig"]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : int = [
"RESNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"ResNetForImageClassification",
"ResNetModel",
"ResNetPreTrainedModel",
"ResNetBackbone",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : List[Any] = [
"TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFResNetForImageClassification",
"TFResNetModel",
"TFResNetPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [
"FlaxResNetForImageClassification",
"FlaxResNetModel",
"FlaxResNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_resnet import (
RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
ResNetBackbone,
ResNetForImageClassification,
ResNetModel,
ResNetPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_resnet import (
TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFResNetForImageClassification,
TFResNetModel,
TFResNetPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel
else:
import sys
SCREAMING_SNAKE_CASE__ : Any = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 270 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase = {
"configuration_rembert": ["REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "RemBertConfig", "RemBertOnnxConfig"]
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = ["RemBertTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = ["RemBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
"REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"RemBertForCausalLM",
"RemBertForMaskedLM",
"RemBertForMultipleChoice",
"RemBertForQuestionAnswering",
"RemBertForSequenceClassification",
"RemBertForTokenClassification",
"RemBertLayer",
"RemBertModel",
"RemBertPreTrainedModel",
"load_tf_weights_in_rembert",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
"TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRemBertForCausalLM",
"TFRemBertForMaskedLM",
"TFRemBertForMultipleChoice",
"TFRemBertForQuestionAnswering",
"TFRemBertForSequenceClassification",
"TFRemBertForTokenClassification",
"TFRemBertLayer",
"TFRemBertModel",
"TFRemBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert import RemBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert_fast import RemBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rembert import (
REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RemBertForCausalLM,
RemBertForMaskedLM,
RemBertForMultipleChoice,
RemBertForQuestionAnswering,
RemBertForSequenceClassification,
RemBertForTokenClassification,
RemBertLayer,
RemBertModel,
RemBertPreTrainedModel,
load_tf_weights_in_rembert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rembert import (
TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRemBertForCausalLM,
TFRemBertForMaskedLM,
TFRemBertForMultipleChoice,
TFRemBertForQuestionAnswering,
TFRemBertForSequenceClassification,
TFRemBertForTokenClassification,
TFRemBertLayer,
TFRemBertModel,
TFRemBertPreTrainedModel,
)
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 126 |
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
SCREAMING_SNAKE_CASE__ : str = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class lowerCAmelCase__ ( unittest.TestCase ):
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[Any]=7 , SCREAMING_SNAKE_CASE__ : List[str]=3 , SCREAMING_SNAKE_CASE__ : Tuple=18 , SCREAMING_SNAKE_CASE__ : int=30 , SCREAMING_SNAKE_CASE__ : Any=4_00 , SCREAMING_SNAKE_CASE__ : Any=None , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=None , ) -> Union[str, Any]:
__lowerCamelCase = size if size is not None else {'''height''': 20, '''width''': 20}
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = num_channels
__lowerCamelCase = image_size
__lowerCamelCase = min_resolution
__lowerCamelCase = max_resolution
__lowerCamelCase = size
__lowerCamelCase = do_normalize
__lowerCamelCase = do_convert_rgb
__lowerCamelCase = [5_12, 10_24, 20_48, 40_96]
__lowerCamelCase = patch_size if patch_size is not None else {'''height''': 16, '''width''': 16}
def __A ( self : Union[str, Any] ) -> Union[str, Any]:
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def __A ( self : int ) -> Any:
__lowerCamelCase = '''https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg'''
__lowerCamelCase = Image.open(requests.get(SCREAMING_SNAKE_CASE__ , stream=SCREAMING_SNAKE_CASE__ ).raw ).convert('''RGB''' )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason="""`Pix2StructImageProcessor` requires `torch>=1.11.0`.""" , )
@require_torch
@require_vision
class lowerCAmelCase__ ( __lowercase , unittest.TestCase ):
a__ : Dict = PixaStructImageProcessor if is_vision_available() else None
def __A ( self : Optional[Any] ) -> List[Any]:
__lowerCamelCase = PixaStructImageProcessingTester(self )
@property
def __A ( self : Optional[int] ) -> Optional[int]:
return self.image_processor_tester.prepare_image_processor_dict()
def __A ( self : Tuple ) -> Dict:
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''do_normalize''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''do_convert_rgb''' ) )
def __A ( self : int ) -> str:
__lowerCamelCase = self.image_processor_tester.prepare_dummy_image()
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
__lowerCamelCase = 20_48
__lowerCamelCase = image_processor(SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE__ )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0606 ) , atol=1e-3 , rtol=1e-3 ) )
def __A ( self : Union[str, Any] ) -> Dict:
# Initialize image_processor
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , Image.Image )
# Test not batched input
__lowerCamelCase = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
__lowerCamelCase = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__lowerCamelCase = image_processor(
SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __A ( self : Dict ) -> str:
# Initialize image_processor
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , Image.Image )
# Test not batched input
__lowerCamelCase = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
__lowerCamelCase = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE__ ).flattened_patches
__lowerCamelCase = '''Hello'''
__lowerCamelCase = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE__ , header_text=SCREAMING_SNAKE_CASE__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__lowerCamelCase = image_processor(
SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE__ , header_text=SCREAMING_SNAKE_CASE__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __A ( self : List[str] ) -> Any:
# Initialize image_processor
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ , numpify=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , np.ndarray )
__lowerCamelCase = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
__lowerCamelCase = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__lowerCamelCase = image_processor(
SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __A ( self : Tuple ) -> List[str]:
# Initialize image_processor
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ , torchify=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , torch.Tensor )
# Test not batched input
__lowerCamelCase = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
__lowerCamelCase = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__lowerCamelCase = image_processor(
SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason="""`Pix2StructImageProcessor` requires `torch>=1.11.0`.""" , )
@require_torch
@require_vision
class lowerCAmelCase__ ( __lowercase , unittest.TestCase ):
a__ : Any = PixaStructImageProcessor if is_vision_available() else None
def __A ( self : Union[str, Any] ) -> Optional[Any]:
__lowerCamelCase = PixaStructImageProcessingTester(self , num_channels=4 )
__lowerCamelCase = 3
@property
def __A ( self : List[Any] ) -> Optional[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def __A ( self : Union[str, Any] ) -> List[str]:
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''do_normalize''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''do_convert_rgb''' ) )
def __A ( self : Optional[int] ) -> str:
# Initialize image_processor
__lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , Image.Image )
# Test not batched input
__lowerCamelCase = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
__lowerCamelCase = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
__lowerCamelCase = image_processor(
SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' , max_patches=SCREAMING_SNAKE_CASE__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
| 270 | 0 |
"""simple docstring"""
import math
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Optional[int], lowerCamelCase : Any=0 )-> Any: # a graph with Node 0,1,...,N-1
lowerCamelCase__ : Optional[Any] =n
lowerCamelCase__ : Any =[
[math.inf for j in range(0, SCREAMING_SNAKE_CASE__ )] for i in range(0, SCREAMING_SNAKE_CASE__ )
] # adjacency matrix for weight
lowerCamelCase__ : int =[
[math.inf for j in range(0, SCREAMING_SNAKE_CASE__ )] for i in range(0, SCREAMING_SNAKE_CASE__ )
] # dp[i][j] stores minimum distance from i to j
def snake_case ( self : Optional[Any], lowerCamelCase : Any, lowerCamelCase : List[Any], lowerCamelCase : Optional[int] )-> Optional[Any]:
lowerCamelCase__ : List[Any] =w
def snake_case ( self : Tuple )-> int:
for k in range(0, self.n ):
for i in range(0, self.n ):
for j in range(0, self.n ):
lowerCamelCase__ : Union[str, Any] =min(self.dp[i][j], self.dp[i][k] + self.dp[k][j] )
def snake_case ( self : Any, lowerCamelCase : Optional[int], lowerCamelCase : int )-> Dict:
return self.dp[u][v]
if __name__ == "__main__":
_lowercase : Any = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 1_0)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 1_0)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 238 |
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class lowerCAmelCase__ ( __lowercase ):
def __init__( self : int ) -> Optional[int]:
__lowerCamelCase = []
def __A ( self : Any , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str , **SCREAMING_SNAKE_CASE__ : List[Any] ) -> Tuple:
self.events.append('''on_init_end''' )
def __A ( self : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , **SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Any:
self.events.append('''on_train_begin''' )
def __A ( self : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Dict , **SCREAMING_SNAKE_CASE__ : int ) -> Any:
self.events.append('''on_train_end''' )
def __A ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , **SCREAMING_SNAKE_CASE__ : List[Any] ) -> int:
self.events.append('''on_epoch_begin''' )
def __A ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[Any] , **SCREAMING_SNAKE_CASE__ : Tuple ) -> Optional[Any]:
self.events.append('''on_epoch_end''' )
def __A ( self : str , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , **SCREAMING_SNAKE_CASE__ : str ) -> List[Any]:
self.events.append('''on_step_begin''' )
def __A ( self : Any , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Any , **SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Tuple:
self.events.append('''on_step_end''' )
def __A ( self : int , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int , **SCREAMING_SNAKE_CASE__ : List[Any] ) -> Union[str, Any]:
self.events.append('''on_evaluate''' )
def __A ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : str , **SCREAMING_SNAKE_CASE__ : str ) -> str:
self.events.append('''on_predict''' )
def __A ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , **SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Tuple:
self.events.append('''on_save''' )
def __A ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , **SCREAMING_SNAKE_CASE__ : List[str] ) -> List[str]:
self.events.append('''on_log''' )
def __A ( self : int , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[Any] , **SCREAMING_SNAKE_CASE__ : Optional[int] ) -> str:
self.events.append('''on_prediction_step''' )
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
def __A ( self : Union[str, Any] ) -> Optional[Any]:
__lowerCamelCase = tempfile.mkdtemp()
def __A ( self : int ) -> List[str]:
shutil.rmtree(self.output_dir )
def __A ( self : Tuple , SCREAMING_SNAKE_CASE__ : List[Any]=0 , SCREAMING_SNAKE_CASE__ : Any=0 , SCREAMING_SNAKE_CASE__ : List[str]=64 , SCREAMING_SNAKE_CASE__ : Optional[int]=64 , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : Optional[int]=False , **SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Union[str, Any]:
# disable_tqdm in TrainingArguments has a flaky default since it depends on the level of logging. We make sure
# its set to False since the tests later on depend on its value.
__lowerCamelCase = RegressionDataset(length=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = RegressionDataset(length=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = RegressionModelConfig(a=SCREAMING_SNAKE_CASE__ , b=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = RegressionPreTrainedModel(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = TrainingArguments(self.output_dir , disable_tqdm=SCREAMING_SNAKE_CASE__ , report_to=[] , **SCREAMING_SNAKE_CASE__ )
return Trainer(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , train_dataset=SCREAMING_SNAKE_CASE__ , eval_dataset=SCREAMING_SNAKE_CASE__ , callbacks=SCREAMING_SNAKE_CASE__ , )
def __A ( self : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : str ) -> Optional[int]:
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , len(SCREAMING_SNAKE_CASE__ ) )
# Order doesn't matter
__lowerCamelCase = sorted(SCREAMING_SNAKE_CASE__ , key=lambda SCREAMING_SNAKE_CASE__ : cb.__name__ if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else cb.__class__.__name__ )
__lowerCamelCase = sorted(SCREAMING_SNAKE_CASE__ , key=lambda SCREAMING_SNAKE_CASE__ : cb.__name__ if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else cb.__class__.__name__ )
for cba, cba in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
self.assertEqual(SCREAMING_SNAKE_CASE__ , cba.__class__ )
elif not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
self.assertEqual(cba.__class__ , SCREAMING_SNAKE_CASE__ )
else:
self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __A ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Tuple:
__lowerCamelCase = ['''on_init_end''', '''on_train_begin''']
__lowerCamelCase = 0
__lowerCamelCase = len(trainer.get_eval_dataloader() )
__lowerCamelCase = ['''on_prediction_step'''] * len(trainer.get_eval_dataloader() ) + ['''on_log''', '''on_evaluate''']
for _ in range(trainer.state.num_train_epochs ):
expected_events.append('''on_epoch_begin''' )
for _ in range(SCREAMING_SNAKE_CASE__ ):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append('''on_log''' )
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append('''on_save''' )
expected_events.append('''on_epoch_end''' )
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def __A ( self : Union[str, Any] ) -> int:
__lowerCamelCase = self.get_trainer()
__lowerCamelCase = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE__ )
# Callbacks passed at init are added to the default callbacks
__lowerCamelCase = self.get_trainer(callbacks=[MyTestTrainerCallback] )
expected_callbacks.append(SCREAMING_SNAKE_CASE__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE__ )
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
__lowerCamelCase = self.get_trainer(disable_tqdm=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE__ )
def __A ( self : List[Any] ) -> str:
__lowerCamelCase = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
__lowerCamelCase = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(SCREAMING_SNAKE_CASE__ )
expected_callbacks.remove(SCREAMING_SNAKE_CASE__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = self.get_trainer()
__lowerCamelCase = trainer.pop_callback(SCREAMING_SNAKE_CASE__ )
self.assertEqual(cb.__class__ , SCREAMING_SNAKE_CASE__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE__ )
trainer.add_callback(SCREAMING_SNAKE_CASE__ )
expected_callbacks.insert(0 , SCREAMING_SNAKE_CASE__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE__ )
# We can also add, pop, or remove by instance
__lowerCamelCase = self.get_trainer()
__lowerCamelCase = trainer.callback_handler.callbacks[0]
trainer.remove_callback(SCREAMING_SNAKE_CASE__ )
expected_callbacks.remove(SCREAMING_SNAKE_CASE__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = self.get_trainer()
__lowerCamelCase = trainer.callback_handler.callbacks[0]
__lowerCamelCase = trainer.pop_callback(SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE__ )
trainer.add_callback(SCREAMING_SNAKE_CASE__ )
expected_callbacks.insert(0 , SCREAMING_SNAKE_CASE__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE__ )
def __A ( self : Union[str, Any] ) -> Any:
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action='''ignore''' , category=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = self.get_trainer(callbacks=[MyTestTrainerCallback] )
trainer.train()
__lowerCamelCase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(SCREAMING_SNAKE_CASE__ , self.get_expected_events(SCREAMING_SNAKE_CASE__ ) )
# Independent log/save/eval
__lowerCamelCase = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5 )
trainer.train()
__lowerCamelCase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(SCREAMING_SNAKE_CASE__ , self.get_expected_events(SCREAMING_SNAKE_CASE__ ) )
__lowerCamelCase = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5 )
trainer.train()
__lowerCamelCase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(SCREAMING_SNAKE_CASE__ , self.get_expected_events(SCREAMING_SNAKE_CASE__ ) )
__lowerCamelCase = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy='''steps''' )
trainer.train()
__lowerCamelCase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(SCREAMING_SNAKE_CASE__ , self.get_expected_events(SCREAMING_SNAKE_CASE__ ) )
__lowerCamelCase = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy='''epoch''' )
trainer.train()
__lowerCamelCase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(SCREAMING_SNAKE_CASE__ , self.get_expected_events(SCREAMING_SNAKE_CASE__ ) )
# A bit of everything
__lowerCamelCase = self.get_trainer(
callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=10 , eval_steps=5 , evaluation_strategy='''steps''' , )
trainer.train()
__lowerCamelCase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(SCREAMING_SNAKE_CASE__ , self.get_expected_events(SCREAMING_SNAKE_CASE__ ) )
# warning should be emitted for duplicated callbacks
with patch('''transformers.trainer_callback.logger.warning''' ) as warn_mock:
__lowerCamelCase = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , )
assert str(SCREAMING_SNAKE_CASE__ ) in warn_mock.call_args[0][0]
| 270 | 0 |
'''simple docstring'''
from __future__ import annotations
import time
__lowerCAmelCase = list[tuple[int, int]]
__lowerCAmelCase = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
__lowerCAmelCase = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self : Any ,_a : int ,_a : int ,_a : int ,_a : int ,_a : Node | None ):
'''simple docstring'''
_a : Union[str, Any] = pos_x
_a : List[str] = pos_y
_a : Optional[int] = (pos_y, pos_x)
_a : Tuple = goal_x
_a : Dict = goal_y
_a : Tuple = parent
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self : Tuple ,_a : tuple[int, int] ,_a : tuple[int, int] ):
'''simple docstring'''
_a : Dict = Node(start[1] ,start[0] ,goal[1] ,goal[0] ,_a )
_a : Any = Node(goal[1] ,goal[0] ,goal[1] ,goal[0] ,_a )
_a : Dict = [self.start]
_a : str = False
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
while self.node_queue:
_a : Optional[int] = self.node_queue.pop(0 )
if current_node.pos == self.target.pos:
_a : Dict = True
return self.retrace_path(_a )
_a : Optional[Any] = self.get_successors(_a )
for node in successors:
self.node_queue.append(_a )
if not self.reached:
return [self.start.pos]
return None
def __lowercase ( self : List[Any] ,_a : Node ):
'''simple docstring'''
_a : str = []
for action in delta:
_a : Any = parent.pos_x + action[1]
_a : List[str] = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(_a ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(_a ,_a ,self.target.pos_y ,self.target.pos_x ,_a ) )
return successors
def __lowercase ( self : List[str] ,_a : Node | None ):
'''simple docstring'''
_a : Dict = node
_a : Optional[Any] = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
_a : Union[str, Any] = current_node.parent
path.reverse()
return path
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self : int ,_a : str ,_a : Optional[int] ):
'''simple docstring'''
_a : Any = BreadthFirstSearch(_a ,_a )
_a : int = BreadthFirstSearch(_a ,_a )
_a : Dict = False
def __lowercase ( self : Tuple ):
'''simple docstring'''
while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue:
_a : str = self.fwd_bfs.node_queue.pop(0 )
_a : Optional[int] = self.bwd_bfs.node_queue.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
_a : List[str] = True
return self.retrace_bidirectional_path(
_a ,_a )
_a : Tuple = current_bwd_node
_a : Tuple = current_fwd_node
_a : Tuple = {
self.fwd_bfs: self.fwd_bfs.get_successors(_a ),
self.bwd_bfs: self.bwd_bfs.get_successors(_a ),
}
for bfs in [self.fwd_bfs, self.bwd_bfs]:
for node in successors[bfs]:
bfs.node_queue.append(_a )
if not self.reached:
return [self.fwd_bfs.start.pos]
return None
def __lowercase ( self : Dict ,_a : Node ,_a : Node ):
'''simple docstring'''
_a : List[Any] = self.fwd_bfs.retrace_path(_a )
_a : List[str] = self.bwd_bfs.retrace_path(_a )
bwd_path.pop()
bwd_path.reverse()
_a : List[str] = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
import doctest
doctest.testmod()
__lowerCAmelCase = (0, 0)
__lowerCAmelCase = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
__lowerCAmelCase = time.time()
__lowerCAmelCase = BreadthFirstSearch(init, goal)
__lowerCAmelCase = bfs.search()
__lowerCAmelCase = time.time() - start_bfs_time
print("""Unidirectional BFS computation time : """, bfs_time)
__lowerCAmelCase = time.time()
__lowerCAmelCase = BidirectionalBreadthFirstSearch(init, goal)
__lowerCAmelCase = bd_bfs.search()
__lowerCAmelCase = time.time() - start_bd_bfs_time
print("""Bidirectional BFS computation time : """, bd_bfs_time)
| 271 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@property
def __lowercase ( self : List[str] ):
'''simple docstring'''
torch.manual_seed(0 )
_a : int = UNetaDModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=3 ,out_channels=3 ,down_block_types=('DownBlock2D', 'AttnDownBlock2D') ,up_block_types=('AttnUpBlock2D', 'UpBlock2D') ,)
return model
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
_a : str = self.dummy_uncond_unet
_a : int = PNDMScheduler()
_a : str = PNDMPipeline(unet=_a ,scheduler=_a )
pndm.to(_a )
pndm.set_progress_bar_config(disable=_a )
_a : Optional[int] = torch.manual_seed(0 )
_a : Optional[Any] = pndm(generator=_a ,num_inference_steps=20 ,output_type='numpy' ).images
_a : List[str] = torch.manual_seed(0 )
_a : Any = pndm(generator=_a ,num_inference_steps=20 ,output_type='numpy' ,return_dict=_a )[0]
_a : List[Any] = image[0, -3:, -3:, -1]
_a : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_a : List[Any] = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self : Tuple ):
'''simple docstring'''
_a : List[str] = 'google/ddpm-cifar10-32'
_a : str = UNetaDModel.from_pretrained(_a )
_a : Union[str, Any] = PNDMScheduler()
_a : Tuple = PNDMPipeline(unet=_a ,scheduler=_a )
pndm.to(_a )
pndm.set_progress_bar_config(disable=_a )
_a : str = torch.manual_seed(0 )
_a : Optional[Any] = pndm(generator=_a ,output_type='numpy' ).images
_a : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_a : Tuple = np.array([0.1564, 0.1_4645, 0.1406, 0.1_4715, 0.1_2425, 0.1_4045, 0.1_3115, 0.1_2175, 0.125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 271 | 1 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_videomae import VideoMAEImageProcessor
__lowerCAmelCase = logging.get_logger(__name__)
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
def __init__( self : Tuple ,*_a : List[str] ,**_a : Any ):
'''simple docstring'''
warnings.warn(
'The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use VideoMAEImageProcessor instead.' ,_a ,)
super().__init__(*_a ,**_a )
| 271 |
'''simple docstring'''
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
__lowerCAmelCase = logging.getLogger()
@unittest.skip('''Temporarily disable the doc tests.''' )
@require_torch
@require_tf
@slow
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self : str ,_a : Path ,_a : Union[str, None] = None ,_a : Union[List[str], None] = None ,_a : Union[str, List[str], None] = None ,_a : bool = True ,):
'''simple docstring'''
_a : Optional[int] = [file for file in os.listdir(_a ) if os.path.isfile(os.path.join(_a ,_a ) )]
if identifier is not None:
_a : List[str] = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(_a ,_a ):
for n_ in n_identifier:
_a : Tuple = [file for file in files if n_ not in file]
else:
_a : Optional[Any] = [file for file in files if n_identifier not in file]
_a : List[str] = ignore_files or []
ignore_files.append('__init__.py' )
_a : Tuple = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print('Testing' ,_a )
if only_modules:
_a : Any = file.split('.' )[0]
try:
_a : List[str] = getattr(_a ,_a )
_a : int = doctest.DocTestSuite(_a )
_a : Any = unittest.TextTestRunner().run(_a )
self.assertIs(len(result.failures ) ,0 )
except AttributeError:
logger.info(F"""{module_identifier} is not a module.""" )
else:
_a : Union[str, Any] = doctest.testfile(str('..' / directory / file ) ,optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed ,0 )
def __lowercase ( self : Any ):
'''simple docstring'''
_a : int = Path('src/transformers' )
_a : List[Any] = 'modeling'
_a : Optional[Any] = [
'modeling_ctrl.py',
'modeling_tf_ctrl.py',
]
self.analyze_directory(_a ,identifier=_a ,ignore_files=_a )
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
_a : Optional[Any] = Path('src/transformers' )
_a : Optional[Any] = 'tokenization'
self.analyze_directory(_a ,identifier=_a )
def __lowercase ( self : List[str] ):
'''simple docstring'''
_a : Dict = Path('src/transformers' )
_a : str = 'configuration'
self.analyze_directory(_a ,identifier=_a )
def __lowercase ( self : Any ):
'''simple docstring'''
_a : Tuple = Path('src/transformers' )
_a : List[Any] = ['configuration', 'modeling', 'tokenization']
self.analyze_directory(_a ,n_identifier=_a )
def __lowercase ( self : Tuple ):
'''simple docstring'''
_a : List[Any] = Path('docs/source' )
_a : List[str] = ['favicon.ico']
self.analyze_directory(_a ,ignore_files=_a ,only_modules=_a )
| 271 | 1 |
'''simple docstring'''
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
__lowerCAmelCase = logging.get_logger(__name__)
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
__UpperCAmelCase : List[Any] = '''vision-encoder-decoder'''
__UpperCAmelCase : str = True
def __init__( self : Optional[int] ,**_a : Any ):
'''simple docstring'''
super().__init__(**_a )
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
F"""A configuraton of type {self.model_type} cannot be instantiated because """
F"""not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}""" )
_a : Any = kwargs.pop('encoder' )
_a : Tuple = encoder_config.pop('model_type' )
_a : int = kwargs.pop('decoder' )
_a : Dict = decoder_config.pop('model_type' )
_a : Optional[Any] = AutoConfig.for_model(_a ,**_a )
_a : Union[str, Any] = AutoConfig.for_model(_a ,**_a )
_a : Optional[Any] = True
@classmethod
def __lowercase ( cls : Optional[int] ,_a : PretrainedConfig ,_a : PretrainedConfig ,**_a : Any ):
'''simple docstring'''
logger.info('Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config' )
_a : List[str] = True
_a : Union[str, Any] = True
return cls(encoder=encoder_config.to_dict() ,decoder=decoder_config.to_dict() ,**_a )
def __lowercase ( self : int ):
'''simple docstring'''
_a : Optional[Any] = copy.deepcopy(self.__dict__ )
_a : int = self.encoder.to_dict()
_a : int = self.decoder.to_dict()
_a : Any = self.__class__.model_type
return output
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = version.parse('''1.11''' )
@property
def __lowercase ( self : int ):
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
return 1E-4
@property
def __lowercase ( self : int ):
'''simple docstring'''
return OrderedDict({'last_hidden_state': {0: 'batch', 1: 'encoder_sequence'}} )
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
@property
def __lowercase ( self : List[str] ):
'''simple docstring'''
_a : List[str] = OrderedDict()
_a : Optional[int] = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
_a : Optional[int] = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
_a : Any = {0: 'batch', 1: 'encoder_sequence'}
return common_inputs
def __lowercase ( self : Optional[int] ,_a : "PreTrainedTokenizerBase" ,_a : int = -1 ,_a : int = -1 ,_a : bool = False ,_a : Optional["TensorType"] = None ,):
'''simple docstring'''
import torch
_a : List[str] = OrderedDict()
_a : Dict = super().generate_dummy_inputs(
_a ,batch_size=_a ,seq_length=_a ,is_pair=_a ,framework=_a )
_a, _a : str = dummy_input['input_ids'].shape
_a : List[str] = (batch, encoder_sequence, self._config.encoder_hidden_size)
_a : Optional[int] = dummy_input.pop('input_ids' )
_a : Union[str, Any] = dummy_input.pop('attention_mask' )
_a : str = torch.zeros(_a )
return common_inputs
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
@property
def __lowercase ( self : str ):
'''simple docstring'''
pass
def __lowercase ( self : Any ,_a : PretrainedConfig ):
'''simple docstring'''
return VisionEncoderDecoderEncoderOnnxConfig(_a )
def __lowercase ( self : Any ,_a : PretrainedConfig ,_a : PretrainedConfig ,_a : str = "default" ):
'''simple docstring'''
_a : Tuple = encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(_a ,_a )
| 271 |
'''simple docstring'''
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCAmelCase_ (__a : Optional[Any] , __a : str , __a : Optional[Any]=None ):
"""simple docstring"""
assert torch_layer.weight.shape == weight.shape, f"""{torch_layer} layer.weight does not match"""
_a : str = nn.Parameter(__a )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, f"""{torch_layer} layer.bias does not match"""
_a : Any = nn.Parameter(__a )
def UpperCAmelCase_ (__a : int , __a : Optional[Any] , __a : int ):
"""simple docstring"""
_a : Tuple = np.asarray(weights[0] )
_a : Union[str, Any] = np.asarray(weights[1] )
_a : Dict = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(__a ).transpose(1 , 2 ).contiguous().view(-1 , __a ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(__a ).transpose(1 , 2 ).contiguous().view(-1 , __a ) , )
set_param(
torch_layer.output.dense , torch.tensor(__a ).view(-1 , __a ).contiguous().transpose(0 , 1 ) , )
def UpperCAmelCase_ (__a : Optional[Any] , __a : Optional[int] , __a : List[str] ):
"""simple docstring"""
_a : Dict = np.asarray(weights[0] )
_a : Union[str, Any] = np.asarray(weights[1] )
_a : str = np.asarray(weights[2] )
_a : int = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(__a ).transpose(1 , 2 ).contiguous().view(-1 , __a ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(__a ).transpose(1 , 2 ).contiguous().view(-1 , __a ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(__a ).transpose(1 , 2 ).contiguous().view(-1 , __a ) , )
set_param(
torch_layer.output.dense , torch.tensor(__a ).view(-1 , __a ).contiguous().transpose(0 , 1 ) , )
def UpperCAmelCase_ (__a : Any , __a : Any , __a : Optional[Any] ):
"""simple docstring"""
_a : List[str] = weights[0][0][0]
_a : List[Any] = np.asarray(layer_norm_a[0] )
_a : List[str] = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(__a ) , torch.tensor(__a ) , )
# lsh weights + output
_a : List[str] = weights[0][1]
if len(__a ) < 4:
set_layer_weights_in_torch_lsh(__a , torch_block.attention , __a )
else:
set_layer_weights_in_torch_local(__a , torch_block.attention , __a )
# intermediate weighs
_a : Optional[Any] = weights[2][0][1][2]
# Chunked Feed Forward
if len(__a ) == 4:
_a : Union[str, Any] = intermediate_weights[2]
# layernorm 2
_a : Any = np.asarray(intermediate_weights[0][0] )
_a : List[Any] = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(__a ) , torch.tensor(__a ) , )
# intermediate dense
_a : Any = np.asarray(intermediate_weights[1][0] )
_a : Any = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(__a ).transpose(0 , 1 ).contiguous() , torch.tensor(__a ) , )
# intermediate out
_a : Optional[int] = np.asarray(intermediate_weights[4][0] )
_a : int = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(__a ).transpose(0 , 1 ).contiguous() , torch.tensor(__a ) , )
def UpperCAmelCase_ (__a : Dict , __a : Dict , __a : List[Any] ):
"""simple docstring"""
_a : Optional[int] = torch_model.reformer
# word embeds
_a : Tuple = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(__a ) , )
if isinstance(weights[3] , __a ):
_a : Any = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
_a : List[Any] = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), f"""{position_embeddings[emb_idx]} emb does not match"""
_a : Any = nn.Parameter(torch.tensor(__a ) )
_a : List[str] = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
__a ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
_a : Tuple = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(__a , __a , __a )
# output layer norm
_a : Optional[Any] = np.asarray(weights[7][0] )
_a : int = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(__a ) , torch.tensor(__a ) , )
# output embeddings
_a : List[str] = np.asarray(weights[9][0] )
_a : int = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(__a ).transpose(0 , 1 ).contiguous() , torch.tensor(__a ) , )
def UpperCAmelCase_ (__a : Tuple , __a : Optional[Any] , __a : Dict ):
"""simple docstring"""
_a : List[Any] = ReformerConfig.from_json_file(__a )
print(f"""Building PyTorch model from configuration: {config}""" )
_a : int = ReformerModelWithLMHead(__a )
with open(__a , 'rb' ) as f:
_a : Optional[Any] = pickle.load(__a )['weights']
set_model_weights_in_torch(__a , __a , config.hidden_size )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , __a )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--trax_model_pkl_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained Reformer model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
__lowerCAmelCase = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 271 | 1 |
'''simple docstring'''
import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def UpperCAmelCase_ (__a : List[Any] ):
"""simple docstring"""
if (
(cp >= 0x4E_00 and cp <= 0x9F_FF)
or (cp >= 0x34_00 and cp <= 0x4D_BF) #
or (cp >= 0x2_00_00 and cp <= 0x2_A6_DF) #
or (cp >= 0x2_A7_00 and cp <= 0x2_B7_3F) #
or (cp >= 0x2_B7_40 and cp <= 0x2_B8_1F) #
or (cp >= 0x2_B8_20 and cp <= 0x2_CE_AF) #
or (cp >= 0xF9_00 and cp <= 0xFA_FF)
or (cp >= 0x2_F8_00 and cp <= 0x2_FA_1F) #
): #
return True
return False
def UpperCAmelCase_ (__a : str ):
"""simple docstring"""
for char in word:
_a : Union[str, Any] = ord(__a )
if not _is_chinese_char(__a ):
return 0
return 1
def UpperCAmelCase_ (__a : List[str] ):
"""simple docstring"""
_a : Dict = set()
for token in tokens:
_a : str = len(__a ) > 1 and is_chinese(__a )
if chinese_word:
word_set.add(__a )
_a : Optional[Any] = list(__a )
return word_list
def UpperCAmelCase_ (__a : List[str] , __a : set() ):
"""simple docstring"""
if not chinese_word_set:
return bert_tokens
_a : Optional[Any] = max([len(__a ) for w in chinese_word_set] )
_a : Optional[int] = bert_tokens
_a, _a : Any = 0, len(__a )
while start < end:
_a : Tuple = True
if is_chinese(bert_word[start] ):
_a : Union[str, Any] = min(end - start , __a )
for i in range(__a , 1 , -1 ):
_a : Optional[Any] = ''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
_a : Any = '##' + bert_word[j]
_a : Union[str, Any] = start + i
_a : int = False
break
if single_word:
start += 1
return bert_word
def UpperCAmelCase_ (__a : List[str] , __a : LTP , __a : BertTokenizer ):
"""simple docstring"""
_a : int = []
for i in range(0 , len(__a ) , 1_0_0 ):
_a : Union[str, Any] = ltp_tokenizer.seg(lines[i : i + 1_0_0] )[0]
_a : Optional[Any] = [get_chinese_word(__a ) for r in res]
ltp_res.extend(__a )
assert len(__a ) == len(__a )
_a : str = []
for i in range(0 , len(__a ) , 1_0_0 ):
_a : List[str] = bert_tokenizer(lines[i : i + 1_0_0] , add_special_tokens=__a , truncation=__a , max_length=5_1_2 )
bert_res.extend(res['input_ids'] )
assert len(__a ) == len(__a )
_a : List[str] = []
for input_ids, chinese_word in zip(__a , __a ):
_a : int = []
for id in input_ids:
_a : Optional[int] = bert_tokenizer._convert_id_to_token(__a )
input_tokens.append(__a )
_a : List[str] = add_sub_symbol(__a , __a )
_a : Tuple = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(__a ):
if token[:2] == "##":
_a : str = token[2:]
# save chinese tokens' pos
if len(__a ) == 1 and _is_chinese_char(ord(__a ) ):
ref_id.append(__a )
ref_ids.append(__a )
assert len(__a ) == len(__a )
return ref_ids
def UpperCAmelCase_ (__a : Optional[Any] ):
"""simple docstring"""
with open(args.file_name , 'r' , encoding='utf-8' ) as f:
_a : Dict = f.readlines()
_a : int = [line.strip() for line in data if len(__a ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
_a : int = LTP(args.ltp ) # faster in GPU device
_a : Tuple = BertTokenizer.from_pretrained(args.bert )
_a : int = prepare_ref(__a , __a , __a )
with open(args.save_path , 'w' , encoding='utf-8' ) as f:
_a : Optional[Any] = [json.dumps(__a ) + '\n' for ref in ref_ids]
f.writelines(__a )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser(description="""prepare_chinese_ref""")
parser.add_argument(
"""--file_name""",
type=str,
default="""./resources/chinese-demo.txt""",
help="""file need process, same as training data in lm""",
)
parser.add_argument(
"""--ltp""", type=str, default="""./resources/ltp""", help="""resources for LTP tokenizer, usually a path"""
)
parser.add_argument("""--bert""", type=str, default="""./resources/robert""", help="""resources for Bert tokenizer""")
parser.add_argument("""--save_path""", type=str, default="""./resources/ref.txt""", help="""path to save res""")
__lowerCAmelCase = parser.parse_args()
main(args)
| 271 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@property
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
torch.manual_seed(0 )
_a : Any = UNetaDModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=3 ,out_channels=3 ,down_block_types=('DownBlock2D', 'AttnDownBlock2D') ,up_block_types=('AttnUpBlock2D', 'UpBlock2D') ,)
return model
@property
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
torch.manual_seed(0 )
_a : Union[str, Any] = VQModel(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] ,up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] ,latent_channels=3 ,)
return model
@property
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
torch.manual_seed(0 )
_a : Any = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,)
return CLIPTextModel(_a )
def __lowercase ( self : Tuple ):
'''simple docstring'''
_a : Dict = self.dummy_uncond_unet
_a : List[Any] = DDIMScheduler()
_a : List[Any] = self.dummy_vq_model
_a : str = LDMPipeline(unet=_a ,vqvae=_a ,scheduler=_a )
ldm.to(_a )
ldm.set_progress_bar_config(disable=_a )
_a : List[str] = torch.manual_seed(0 )
_a : List[str] = ldm(generator=_a ,num_inference_steps=2 ,output_type='numpy' ).images
_a : List[str] = torch.manual_seed(0 )
_a : Union[str, Any] = ldm(generator=_a ,num_inference_steps=2 ,output_type='numpy' ,return_dict=_a )[0]
_a : Tuple = image[0, -3:, -3:, -1]
_a : Optional[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_a : int = np.array([0.8512, 0.818, 0.6411, 0.6808, 0.4465, 0.5618, 0.46, 0.6231, 0.5172] )
_a : Any = 1E-2 if torch_device != 'mps' else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
_a : List[str] = LDMPipeline.from_pretrained('CompVis/ldm-celebahq-256' )
ldm.to(_a )
ldm.set_progress_bar_config(disable=_a )
_a : Optional[int] = torch.manual_seed(0 )
_a : Dict = ldm(generator=_a ,num_inference_steps=5 ,output_type='numpy' ).images
_a : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
_a : Optional[int] = np.array([0.4399, 0.4_4975, 0.4_6825, 0.474, 0.4359, 0.4581, 0.4_5095, 0.4341, 0.4447] )
_a : int = 1E-2 if torch_device != 'mps' else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 271 | 1 |
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
"""asapp/sew-d-tiny-100k""": """https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json""",
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
__UpperCAmelCase : str = '''sew-d'''
def __init__( self : Any ,_a : Dict=32 ,_a : Optional[Any]=768 ,_a : Dict=12 ,_a : Union[str, Any]=12 ,_a : int=3072 ,_a : Optional[int]=2 ,_a : Optional[Any]=512 ,_a : Optional[int]=256 ,_a : Dict=True ,_a : List[Any]=True ,_a : Any=("p2c", "c2p") ,_a : Optional[Any]="layer_norm" ,_a : Dict="gelu_python" ,_a : Tuple=0.1 ,_a : Union[str, Any]=0.1 ,_a : Union[str, Any]=0.1 ,_a : Dict=0.0 ,_a : int=0.1 ,_a : Any=0.02 ,_a : int=1E-7 ,_a : Any=1E-5 ,_a : int="group" ,_a : int="gelu" ,_a : List[str]=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) ,_a : List[str]=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) ,_a : Optional[int]=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) ,_a : List[Any]=False ,_a : str=128 ,_a : List[str]=16 ,_a : List[Any]=True ,_a : Tuple=0.05 ,_a : Tuple=10 ,_a : List[str]=2 ,_a : Optional[int]=0.0 ,_a : List[Any]=10 ,_a : Union[str, Any]=0 ,_a : Optional[Any]="mean" ,_a : Any=False ,_a : Union[str, Any]=False ,_a : Union[str, Any]=256 ,_a : Optional[Any]=0 ,_a : str=1 ,_a : Optional[int]=2 ,**_a : Dict ,):
'''simple docstring'''
super().__init__(**_a ,pad_token_id=_a ,bos_token_id=_a ,eos_token_id=_a )
_a : Optional[Any] = hidden_size
_a : str = feat_extract_norm
_a : Dict = feat_extract_activation
_a : Tuple = list(_a )
_a : int = list(_a )
_a : Tuple = list(_a )
_a : List[str] = conv_bias
_a : Union[str, Any] = num_conv_pos_embeddings
_a : Dict = num_conv_pos_embedding_groups
_a : Dict = len(self.conv_dim )
_a : List[str] = num_hidden_layers
_a : Union[str, Any] = intermediate_size
_a : Tuple = squeeze_factor
_a : Dict = max_position_embeddings
_a : Any = position_buckets
_a : Optional[int] = share_att_key
_a : str = relative_attention
_a : Dict = norm_rel_ebd
_a : int = list(_a )
_a : Dict = hidden_act
_a : Union[str, Any] = num_attention_heads
_a : str = hidden_dropout
_a : List[str] = attention_dropout
_a : int = activation_dropout
_a : Dict = feat_proj_dropout
_a : Optional[int] = final_dropout
_a : str = layer_norm_eps
_a : List[Any] = feature_layer_norm_eps
_a : int = initializer_range
_a : Union[str, Any] = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect.'
'It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'
F"""but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)"""
F"""= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_a : List[str] = apply_spec_augment
_a : Optional[int] = mask_time_prob
_a : str = mask_time_length
_a : Dict = mask_time_min_masks
_a : int = mask_feature_prob
_a : Tuple = mask_feature_length
_a : Optional[Any] = mask_feature_min_masks
# ctc loss
_a : Optional[int] = ctc_loss_reduction
_a : List[Any] = ctc_zero_infinity
# sequence classification
_a : Any = use_weighted_layer_sum
_a : List[str] = classifier_proj_size
@property
def __lowercase ( self : Any ):
'''simple docstring'''
return functools.reduce(operator.mul ,self.conv_stride ,1 )
| 271 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
__lowerCAmelCase = logging.get_logger(__name__)
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
def __init__( self : int ,*_a : Optional[int] ,**_a : str ):
'''simple docstring'''
warnings.warn(
'The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use BeitImageProcessor instead.' ,_a ,)
super().__init__(*_a ,**_a )
| 271 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
"""naver-clova-ix/donut-base""": """https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json""",
# See all Donut models at https://huggingface.co/models?filter=donut-swin
}
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
__UpperCAmelCase : List[str] = '''donut-swin'''
__UpperCAmelCase : Optional[Any] = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self : Dict ,_a : Optional[Any]=224 ,_a : List[str]=4 ,_a : Union[str, Any]=3 ,_a : Any=96 ,_a : str=[2, 2, 6, 2] ,_a : str=[3, 6, 12, 24] ,_a : Union[str, Any]=7 ,_a : int=4.0 ,_a : Optional[Any]=True ,_a : Optional[Any]=0.0 ,_a : List[Any]=0.0 ,_a : Dict=0.1 ,_a : List[Any]="gelu" ,_a : str=False ,_a : List[Any]=0.02 ,_a : Dict=1E-5 ,**_a : List[Any] ,):
'''simple docstring'''
super().__init__(**_a )
_a : Any = image_size
_a : Any = patch_size
_a : str = num_channels
_a : int = embed_dim
_a : Union[str, Any] = depths
_a : Union[str, Any] = len(_a )
_a : Optional[int] = num_heads
_a : Optional[int] = window_size
_a : List[str] = mlp_ratio
_a : str = qkv_bias
_a : int = hidden_dropout_prob
_a : Dict = attention_probs_dropout_prob
_a : str = drop_path_rate
_a : List[str] = hidden_act
_a : List[str] = use_absolute_embeddings
_a : int = layer_norm_eps
_a : List[Any] = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_a : Optional[int] = int(embed_dim * 2 ** (len(_a ) - 1) )
| 271 |
'''simple docstring'''
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
"""linear""": get_linear_schedule_with_warmup,
"""cosine""": get_cosine_schedule_with_warmup,
"""cosine_w_restarts""": get_cosine_with_hard_restarts_schedule_with_warmup,
"""polynomial""": get_polynomial_decay_schedule_with_warmup,
"""constant""": get_constant_schedule,
"""constant_w_warmup""": get_constant_schedule_with_warmup,
}
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
def __init__( self : Optional[int] ,_a : Optional[Any]=None ,_a : Dict=None ,*_a : int ,**_a : str ):
'''simple docstring'''
super().__init__(*_a ,**_a )
if config is None:
assert isinstance(self.model ,_a ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
F""" {self.model.__class__}"""
)
_a : List[Any] = self.model.config
else:
_a : Optional[int] = config
_a : List[str] = data_args
_a : List[Any] = self.config.tgt_vocab_size if isinstance(self.config ,_a ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
F"""The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for"""
' padding..' )
if self.args.label_smoothing == 0:
_a : List[str] = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
_a : Tuple = label_smoothed_nll_loss
def __lowercase ( self : List[str] ,_a : int ):
'''simple docstring'''
if self.optimizer is None:
_a : Union[str, Any] = ['bias', 'LayerNorm.weight']
_a : Tuple = [
{
'params': [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
'weight_decay': self.args.weight_decay,
},
{
'params': [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
'weight_decay': 0.0,
},
]
_a : Optional[int] = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
_a : Any = Adafactor
_a : Dict = {'scale_parameter': False, 'relative_step': False}
else:
_a : Union[str, Any] = AdamW
_a : str = {
'betas': (self.args.adam_betaa, self.args.adam_betaa),
'eps': self.args.adam_epsilon,
}
_a : Union[str, Any] = self.args.learning_rate
if self.sharded_ddp:
_a : str = OSS(
params=_a ,optim=_a ,**_a ,)
else:
_a : Tuple = optimizer_cls(_a ,**_a )
if self.lr_scheduler is None:
_a : List[Any] = self._get_lr_scheduler(_a )
else: # ignoring --lr_scheduler
logger.warning('scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.' )
def __lowercase ( self : List[Any] ,_a : List[Any] ):
'''simple docstring'''
_a : str = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
_a : int = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
_a : List[str] = schedule_func(self.optimizer ,num_warmup_steps=self.args.warmup_steps )
else:
_a : Optional[int] = schedule_func(
self.optimizer ,num_warmup_steps=self.args.warmup_steps ,num_training_steps=_a )
return scheduler
def __lowercase ( self : Tuple ):
'''simple docstring'''
if isinstance(self.train_dataset ,torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size ,distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) ,)
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def __lowercase ( self : Dict ,_a : Dict ,_a : Any ,_a : Dict ):
'''simple docstring'''
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
_a : List[Any] = model(**_a ,use_cache=_a )[0]
_a : Union[str, Any] = self.loss_fn(logits.view(-1 ,logits.shape[-1] ) ,labels.view(-1 ) )
else:
# compute usual loss via models
_a, _a : Union[str, Any] = model(**_a ,labels=_a ,use_cache=_a )[:2]
else:
# compute label smoothed loss
_a : List[Any] = model(**_a ,use_cache=_a )[0]
_a : Any = torch.nn.functional.log_softmax(_a ,dim=-1 )
_a, _a : List[str] = self.loss_fn(_a ,_a ,self.args.label_smoothing ,ignore_index=self.config.pad_token_id )
return loss, logits
def __lowercase ( self : Optional[int] ,_a : Union[str, Any] ,_a : List[Any] ):
'''simple docstring'''
_a : Optional[int] = inputs.pop('labels' )
_a, _a : int = self._compute_loss(_a ,_a ,_a )
return loss
def __lowercase ( self : Optional[Any] ,_a : nn.Module ,_a : Dict[str, Union[torch.Tensor, Any]] ,_a : bool ,_a : Optional[List[str]] = None ,):
'''simple docstring'''
_a : int = self._prepare_inputs(_a )
_a : Any = {
'max_length': self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
'num_beams': self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
_a : int = self.model.generate(
inputs['input_ids'] ,attention_mask=inputs['attention_mask'] ,**_a ,)
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
_a : int = self._pad_tensors_to_max_len(_a ,gen_kwargs['max_length'] )
_a : Union[str, Any] = inputs.pop('labels' )
with torch.no_grad():
# compute loss on predict data
_a, _a : Optional[int] = self._compute_loss(_a ,_a ,_a )
_a : Optional[Any] = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
_a : Optional[Any] = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
_a : Dict = self._pad_tensors_to_max_len(_a ,gen_kwargs['max_length'] )
return (loss, logits, labels)
def __lowercase ( self : str ,_a : Tuple ,_a : Tuple ):
'''simple docstring'''
_a : List[Any] = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
'Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be'
F""" padded to `max_length`={max_length}""" )
_a : int = pad_token_id * torch.ones(
(tensor.shape[0], max_length) ,dtype=tensor.dtype ,device=tensor.device )
_a : Union[str, Any] = tensor
return padded_tensor
| 271 | 1 |
'''simple docstring'''
import warnings
from pathlib import Path
from typing import List, Tuple, Union
import fire
from torch import nn
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel
from transformers.utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
def UpperCAmelCase_ (__a : nn.ModuleList , __a : nn.ModuleList , __a : List[int] ):
"""simple docstring"""
_a : Optional[int] = nn.ModuleList([src_layers[i] for i in layers_to_copy] )
assert len(__a ) == len(__a ), f"""{len(__a )} != {len(__a )}"""
dest_layers.load_state_dict(layers_to_copy.state_dict() )
__lowerCAmelCase = {
# maps num layers in teacher -> num_layers in student -> which teacher layers to copy.
# 12: bart, 16: pegasus, 6: marian/Helsinki-NLP
1_2: {
1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher
2: [0, 6],
3: [0, 6, 1_1],
4: [0, 4, 8, 1_1],
6: [0, 2, 4, 7, 9, 1_1],
9: [0, 1, 2, 4, 5, 7, 9, 1_0, 1_1],
1_2: list(range(1_2)),
},
1_6: { # maps num layers in student -> which teacher layers to copy
1: [0],
2: [0, 1_5],
3: [0, 8, 1_5],
4: [0, 5, 1_0, 1_5],
6: [0, 3, 6, 9, 1_2, 1_5],
8: [0, 2, 4, 6, 8, 1_0, 1_2, 1_5],
9: [0, 1, 3, 5, 7, 9, 1_1, 1_3, 1_5],
1_2: [0, 1, 2, 3, 4, 5, 6, 7, 9, 1_1, 1_3, 1_5],
1_6: list(range(1_6)),
},
6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))},
}
__lowerCAmelCase = {
# maps num layers in student -> which teacher layers to copy.
6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]},
1_2: {1: [1_1], 2: [5, 1_1], 3: [3, 7, 1_1], 6: [1, 3, 5, 8, 1_0, 1_1]},
1_6: {1: [1_5], 4: [4, 9, 1_2, 1_5], 8: [1, 3, 5, 7, 9, 1_1, 1_3, 1_5]},
}
def UpperCAmelCase_ (__a : List[Any] , __a : Optional[Any] ):
"""simple docstring"""
try:
_a : Tuple = LAYERS_TO_COPY[n_teacher][n_student]
return val
except KeyError:
if n_student != n_teacher:
warnings.warn(
f"""no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first"""
f""" {n_student}""" )
return list(range(__a ) )
def UpperCAmelCase_ (__a : Any , __a : Any ):
"""simple docstring"""
if n_student > n_teacher:
raise ValueError(f"""Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}""" )
elif n_teacher == n_student:
return list(range(__a ) )
elif n_student == 1:
return [n_teacher - 1]
else:
return LAYERS_TO_SUPERVISE[n_teacher][n_student]
def UpperCAmelCase_ (__a : Union[str, PreTrainedModel] , __a : Union[str, Path] = "student" , __a : Union[int, None] = None , __a : Union[int, None] = None , __a : str=False , __a : Union[str, Any]=None , __a : int=None , **__a : Optional[Any] , ):
"""simple docstring"""
_a : str = 'encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher.'
assert (e is not None) or (d is not None), _msg
if isinstance(__a , __a ):
AutoTokenizer.from_pretrained(__a ).save_pretrained(__a ) # purely for convenience
_a : str = AutoModelForSeqaSeqLM.from_pretrained(__a ).eval()
else:
assert isinstance(__a , __a ), f"""teacher must be a model or string got type {type(__a )}"""
_a : Any = teacher.config.to_diff_dict()
try:
_a, _a : Optional[Any] = teacher.config.encoder_layers, teacher.config.decoder_layers
if e is None:
_a : str = teacher_e
if d is None:
_a : Union[str, Any] = teacher_d
init_kwargs.update({'encoder_layers': e, 'decoder_layers': d} )
except AttributeError: # T5
if hasattr(teacher.config , 'num_encoder_layers' ):
_a, _a : int = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers
else:
_a, _a : Any = teacher.config.num_layers, teacher.config.num_decoder_layers
if e is None:
_a : int = teacher_e
if d is None:
_a : Tuple = teacher_d
if hasattr(teacher.config , 'num_encoder_layers' ):
init_kwargs.update({'num_encoder_layers': e, 'num_decoder_layers': d} )
else:
init_kwargs.update({'num_layers': e, 'num_decoder_layers': d} )
# Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs
init_kwargs.update(__a )
# Copy weights
_a : Tuple = teacher.config_class(**__a )
_a : Tuple = AutoModelForSeqaSeqLM.from_config(__a )
# Start by copying the full teacher state dict this will copy the first N teacher layers to the student.
_a : Optional[Any] = student.load_state_dict(teacher.state_dict() , strict=__a )
assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys.
if copy_first_teacher_layers: # Our copying is done. We just log and save
_a, _a : List[str] = list(range(__a ) ), list(range(__a ) )
logger.info(
f"""Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to"""
f""" {save_path}""" )
student.save_pretrained(__a )
return student, e_layers_to_copy, d_layers_to_copy
# Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer.
if e_layers_to_copy is None:
_a : List[int] = pick_layers_to_copy(__a , __a )
if d_layers_to_copy is None:
_a : List[int] = pick_layers_to_copy(__a , __a )
try:
if hasattr(
__a , 'prophetnet' ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers
copy_layers(teacher.prophetnet.encoder.layers , student.prophetnet.encoder.layers , __a )
copy_layers(teacher.prophetnet.decoder.layers , student.prophetnet.decoder.layers , __a )
else:
copy_layers(teacher.model.encoder.layers , student.model.encoder.layers , __a )
copy_layers(teacher.model.decoder.layers , student.model.decoder.layers , __a )
except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block
copy_layers(teacher.encoder.block , student.encoder.block , __a )
copy_layers(teacher.decoder.block , student.decoder.block , __a )
logger.info(
f"""Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}""" )
_a : Dict = {
'teacher_type': teacher.config.model_type,
'copied_encoder_layers': e_layers_to_copy,
'copied_decoder_layers': d_layers_to_copy,
}
student.save_pretrained(__a )
# Save information about copying for easier reproducibility
return student, e_layers_to_copy, d_layers_to_copy
if __name__ == "__main__":
fire.Fire(create_student_by_copying_alternating_layers)
| 271 |
'''simple docstring'''
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
__lowerCAmelCase = re.compile(r"""\s+""")
def UpperCAmelCase_ (__a : Any ):
"""simple docstring"""
return {"hash": hashlib.mda(re.sub(__a , '' , example['content'] ).encode('utf-8' ) ).hexdigest()}
def UpperCAmelCase_ (__a : str ):
"""simple docstring"""
_a : List[str] = [len(__a ) for line in example['content'].splitlines()]
return {"line_mean": np.mean(__a ), "line_max": max(__a )}
def UpperCAmelCase_ (__a : str ):
"""simple docstring"""
_a : Union[str, Any] = np.mean([c.isalnum() for c in example['content']] )
return {"alpha_frac": alpha_frac}
def UpperCAmelCase_ (__a : Optional[int] , __a : Any ):
"""simple docstring"""
if example["hash"] in uniques:
uniques.remove(example['hash'] )
return True
else:
return False
def UpperCAmelCase_ (__a : int , __a : Union[str, Any]=5 ):
"""simple docstring"""
_a : Optional[int] = ['auto-generated', 'autogenerated', 'automatically generated']
_a : List[str] = example['content'].splitlines()
for _, line in zip(range(__a ) , __a ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def UpperCAmelCase_ (__a : List[str] , __a : Dict=5 , __a : Tuple=0.05 ):
"""simple docstring"""
_a : Optional[int] = ['unit tests', 'test file', 'configuration file']
_a : int = example['content'].splitlines()
_a : int = 0
_a : Dict = 0
# first test
for _, line in zip(range(__a ) , __a ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
_a : int = example['content'].count('\n' )
_a : int = int(coeff * nlines )
for line in lines:
count_config += line.lower().count('config' )
count_test += line.lower().count('test' )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def UpperCAmelCase_ (__a : Optional[int] ):
"""simple docstring"""
_a : List[str] = ['def ', 'class ', 'for ', 'while ']
_a : str = example['content'].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def UpperCAmelCase_ (__a : int , __a : Any=4 ):
"""simple docstring"""
_a : List[str] = example['content'].splitlines()
_a : Dict = 0
for line in lines:
counter += line.lower().count('=' )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def UpperCAmelCase_ (__a : str ):
"""simple docstring"""
_a : Optional[Any] = tokenizer(example['content'] , truncation=__a )['input_ids']
_a : Optional[int] = len(example['content'] ) / len(__a )
return {"ratio": ratio}
def UpperCAmelCase_ (__a : str ):
"""simple docstring"""
_a : Dict = {}
results.update(get_hash(__a ) )
results.update(line_stats(__a ) )
results.update(alpha_stats(__a ) )
results.update(char_token_ratio(__a ) )
results.update(is_autogenerated(__a ) )
results.update(is_config_or_test(__a ) )
results.update(has_no_keywords(__a ) )
results.update(has_few_assignments(__a ) )
return results
def UpperCAmelCase_ (__a : Any , __a : Any , __a : str ):
"""simple docstring"""
if not check_uniques(__a , __a ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def UpperCAmelCase_ (__a : Union[str, Any] ):
"""simple docstring"""
with open(__a , 'rb' ) as f_in:
with gzip.open(str(__a ) + '.gz' , 'wb' , compresslevel=6 ) as f_out:
shutil.copyfileobj(__a , __a )
os.unlink(__a )
# Settings
__lowerCAmelCase = HfArgumentParser(PreprocessingArguments)
__lowerCAmelCase = parser.parse_args()
if args.num_workers is None:
__lowerCAmelCase = multiprocessing.cpu_count()
__lowerCAmelCase = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
__lowerCAmelCase = time.time()
__lowerCAmelCase = load_dataset(args.dataset_name, split="""train""")
print(f'''Time to load dataset: {time.time()-t_start:.2f}''')
# Run preprocessing
__lowerCAmelCase = time.time()
__lowerCAmelCase = ds.map(preprocess, num_proc=args.num_workers)
print(f'''Time to preprocess dataset: {time.time()-t_start:.2f}''')
# Deduplicate hashes
__lowerCAmelCase = set(ds.unique("""hash"""))
__lowerCAmelCase = len(uniques) / len(ds)
print(f'''Fraction of duplicates: {1-frac:.2%}''')
# Deduplicate data and apply heuristics
__lowerCAmelCase = time.time()
__lowerCAmelCase = ds.filter(filter, fn_kwargs={"""uniques""": uniques, """args""": args})
print(f'''Time to filter dataset: {time.time()-t_start:.2f}''')
print(f'''Size of filtered dataset: {len(ds_filter)}''')
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
__lowerCAmelCase = time.time()
__lowerCAmelCase , __lowerCAmelCase = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(f'''Time to deduplicate dataset: {time.time()-t_start:.2f}''')
print(f'''Size of deduplicate dataset: {len(ds_filter)}''')
# Save data in batches of samples_per_file
__lowerCAmelCase = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / """duplicate_clusters.json""", """w""") as f:
json.dump(duplicate_clusters, f)
__lowerCAmelCase = output_dir / """data"""
data_dir.mkdir(exist_ok=True)
__lowerCAmelCase = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
__lowerCAmelCase = str(data_dir / f'''file-{file_number+1:012}.json''')
__lowerCAmelCase = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(f'''Time to save dataset: {time.time()-t_start:.2f}''')
| 271 | 1 |
'''simple docstring'''
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__lowerCAmelCase = get_tests_dir("""fixtures/test_sentencepiece_no_bos.model""")
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase__ ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = PegasusTokenizer
__UpperCAmelCase : int = PegasusTokenizerFast
__UpperCAmelCase : Tuple = True
__UpperCAmelCase : Optional[Any] = True
def __lowercase ( self : Any ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
_a : Optional[int] = PegasusTokenizer(_a )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
return PegasusTokenizer.from_pretrained('google/pegasus-large' )
def __lowercase ( self : str ,**_a : str ):
'''simple docstring'''
return PegasusTokenizer.from_pretrained(self.tmpdirname ,**_a )
def __lowercase ( self : Union[str, Any] ,_a : Dict ):
'''simple docstring'''
return ("This is a test", "This is a test")
def __lowercase ( self : List[Any] ):
'''simple docstring'''
_a : Any = '</s>'
_a : Optional[int] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_a ) ,_a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_a ) ,_a )
def __lowercase ( self : Tuple ):
'''simple docstring'''
_a : List[str] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,'<pad>' )
self.assertEqual(vocab_keys[1] ,'</s>' )
self.assertEqual(vocab_keys[-1] ,'v' )
self.assertEqual(len(_a ) ,1103 )
def __lowercase ( self : Dict ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size ,1103 )
def __lowercase ( self : Any ):
'''simple docstring'''
_a : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
_a : Tuple = self.tokenizer_class.from_pretrained(self.tmpdirname )
_a : Union[str, Any] = (
'Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important'
' </s> <pad> <pad> <pad>'
)
_a : Any = rust_tokenizer([raw_input_str] ,return_tensors=_a ,add_special_tokens=_a ).input_ids[0]
_a : List[str] = py_tokenizer([raw_input_str] ,return_tensors=_a ,add_special_tokens=_a ).input_ids[0]
self.assertListEqual(_a ,_a )
def __lowercase ( self : str ):
'''simple docstring'''
_a : Union[str, Any] = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
_a : List[Any] = '<mask_1> To ensure a <mask_2> flow of bank resolutions.'
_a : List[Any] = [2, 413, 615, 114, 3, 1971, 113, 1679, 1_0710, 107, 1]
_a : Union[str, Any] = tokenizer([raw_input_str] ,return_tensors=_a ).input_ids[0]
self.assertListEqual(_a ,_a )
def __lowercase ( self : Any ):
'''simple docstring'''
_a : str = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 9_6103
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 103
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1024
_a : Any = 'To ensure a smooth flow of bank resolutions.'
_a : int = [413, 615, 114, 2291, 1971, 113, 1679, 1_0710, 107, 1]
_a : Union[str, Any] = tokenizer([raw_input_str] ,return_tensors=_a ).input_ids[0]
self.assertListEqual(_a ,_a )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def __lowercase ( self : str ):
'''simple docstring'''
_a : Tuple = ['This is going to be way too long.' * 150, 'short example']
_a : List[str] = ['not super long but more than 5 tokens', 'tiny']
_a : str = self._large_tokenizer(_a ,padding=_a ,truncation=_a ,return_tensors='pt' )
_a : int = self._large_tokenizer(
text_target=_a ,max_length=5 ,padding=_a ,truncation=_a ,return_tensors='pt' )
assert batch.input_ids.shape == (2, 1024)
assert batch.attention_mask.shape == (2, 1024)
assert targets["input_ids"].shape == (2, 5)
assert len(_a ) == 2 # input_ids, attention_mask.
@slow
def __lowercase ( self : List[Any] ):
'''simple docstring'''
_a : Tuple = {'input_ids': [[3_8979, 143, 1_8485, 606, 130, 2_6669, 8_7686, 121, 5_4189, 1129, 111, 2_6669, 8_7686, 121, 9114, 1_4787, 121, 1_3249, 158, 592, 956, 121, 1_4621, 3_1576, 143, 6_2613, 108, 9688, 930, 4_3430, 1_1562, 6_2613, 304, 108, 1_1443, 897, 108, 9314, 1_7415, 6_3399, 108, 1_1443, 7614, 1_8316, 118, 4284, 7148, 1_2430, 143, 1400, 2_5703, 158, 111, 4284, 7148, 1_1772, 143, 2_1297, 1064, 158, 122, 204, 3506, 1754, 1133, 1_4787, 1581, 115, 3_3224, 4482, 111, 1355, 110, 2_9173, 317, 5_0833, 108, 2_0147, 9_4665, 111, 7_7198, 107, 1], [110, 6_2613, 117, 638, 112, 1133, 121, 2_0098, 1355, 7_9050, 1_3872, 135, 1596, 5_3541, 1352, 141, 1_3039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 1_8289, 1_7780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_a ,model_name='google/bigbird-pegasus-large-arxiv' ,revision='ba85d0851d708441f91440d509690f1ab6353415' ,)
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase__ ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : List[str] = PegasusTokenizer
__UpperCAmelCase : Optional[Any] = PegasusTokenizerFast
__UpperCAmelCase : List[Any] = True
__UpperCAmelCase : Any = True
def __lowercase ( self : List[Any] ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
_a : Tuple = PegasusTokenizer(_a ,offset=0 ,mask_token_sent=_a ,mask_token='[MASK]' )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def __lowercase ( self : List[Any] ):
'''simple docstring'''
return PegasusTokenizer.from_pretrained('google/bigbird-pegasus-large-arxiv' )
def __lowercase ( self : List[Any] ,**_a : Optional[Any] ):
'''simple docstring'''
return PegasusTokenizer.from_pretrained(self.tmpdirname ,**_a )
def __lowercase ( self : Dict ,_a : Union[str, Any] ):
'''simple docstring'''
return ("This is a test", "This is a test")
def __lowercase ( self : List[Any] ):
'''simple docstring'''
_a : str = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
_a : List[str] = self.tokenizer_class.from_pretrained(self.tmpdirname )
_a : Optional[int] = (
'Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>'
' <pad> <pad> <pad>'
)
_a : int = rust_tokenizer([raw_input_str] ,return_tensors=_a ,add_special_tokens=_a ).input_ids[0]
_a : List[Any] = py_tokenizer([raw_input_str] ,return_tensors=_a ,add_special_tokens=_a ).input_ids[0]
self.assertListEqual(_a ,_a )
@require_torch
def __lowercase ( self : List[Any] ):
'''simple docstring'''
_a : Tuple = ['This is going to be way too long.' * 1000, 'short example']
_a : Optional[Any] = ['not super long but more than 5 tokens', 'tiny']
_a : int = self._large_tokenizer(_a ,padding=_a ,truncation=_a ,return_tensors='pt' )
_a : str = self._large_tokenizer(
text_target=_a ,max_length=5 ,padding=_a ,truncation=_a ,return_tensors='pt' )
assert batch.input_ids.shape == (2, 4096)
assert batch.attention_mask.shape == (2, 4096)
assert targets["input_ids"].shape == (2, 5)
assert len(_a ) == 2 # input_ids, attention_mask.
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
_a : Optional[int] = (
'This is an example string that is used to test the original TF implementation against the HF'
' implementation'
)
_a : Optional[Any] = self._large_tokenizer(_a ).input_ids
self.assertListEqual(
_a ,[182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 2_5016, 3137, 464, 109, 2_6955, 3137, 1] ,)
| 271 |
'''simple docstring'''
import argparse
from typing import List
import evaluate
import numpy as np
import torch
from datasets import DatasetDict, load_dataset
# New Code #
# We'll be using StratifiedKFold for this example
from sklearn.model_selection import StratifiedKFold
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to perform Cross Validation,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__lowerCAmelCase = 1_6
__lowerCAmelCase = 3_2
def UpperCAmelCase_ (__a : Accelerator , __a : DatasetDict , __a : List[int] , __a : List[int] , __a : int = 1_6 ):
"""simple docstring"""
_a : Union[str, Any] = AutoTokenizer.from_pretrained('bert-base-cased' )
_a : str = DatasetDict(
{
'train': dataset['train'].select(__a ),
'validation': dataset['train'].select(__a ),
'test': dataset['validation'],
} )
def tokenize_function(__a : List[Any] ):
# max_length=None => use the model max length (it's actually the default)
_a : Optional[int] = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=__a , max_length=__a )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_a : List[str] = datasets.map(
__a , batched=__a , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_a : List[Any] = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(__a : int ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_a : Dict = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_a : Tuple = 1_6
elif accelerator.mixed_precision != "no":
_a : List[Any] = 8
else:
_a : List[Any] = None
return tokenizer.pad(
__a , padding='longest' , max_length=__a , pad_to_multiple_of=__a , return_tensors='pt' , )
# Instantiate dataloaders.
_a : Any = DataLoader(
tokenized_datasets['train'] , shuffle=__a , collate_fn=__a , batch_size=__a )
_a : Optional[int] = DataLoader(
tokenized_datasets['validation'] , shuffle=__a , collate_fn=__a , batch_size=__a )
_a : Optional[Any] = DataLoader(
tokenized_datasets['test'] , shuffle=__a , collate_fn=__a , batch_size=__a )
return train_dataloader, eval_dataloader, test_dataloader
def UpperCAmelCase_ (__a : Any , __a : Union[str, Any] ):
"""simple docstring"""
_a : Dict = []
# Download the dataset
_a : Tuple = load_dataset('glue' , 'mrpc' )
# Create our splits
_a : Union[str, Any] = StratifiedKFold(n_splits=int(args.num_folds ) )
# Initialize accelerator
_a : Any = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_a : Optional[Any] = config['lr']
_a : Optional[int] = int(config['num_epochs'] )
_a : Dict = int(config['seed'] )
_a : Dict = int(config['batch_size'] )
_a : Optional[int] = evaluate.load('glue' , 'mrpc' )
# If the batch size is too big we use gradient accumulation
_a : List[Any] = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
_a : Any = batch_size // MAX_GPU_BATCH_SIZE
_a : List[str] = MAX_GPU_BATCH_SIZE
set_seed(__a )
# New Code #
# Create our folds:
_a : int = kfold.split(np.zeros(datasets['train'].num_rows ) , datasets['train']['label'] )
_a : Any = []
# Iterate over them
for i, (train_idxs, valid_idxs) in enumerate(__a ):
_a, _a, _a : Optional[Any] = get_fold_dataloaders(
__a , __a , __a , __a , )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_a : Dict = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=__a )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_a : List[Any] = model.to(accelerator.device )
# Instantiate optimizer
_a : List[str] = AdamW(params=model.parameters() , lr=__a )
# Instantiate scheduler
_a : List[Any] = get_linear_schedule_with_warmup(
optimizer=__a , num_warmup_steps=1_0_0 , num_training_steps=(len(__a ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_a, _a, _a, _a, _a : Union[str, Any] = accelerator.prepare(
__a , __a , __a , __a , __a )
# Now we train the model
for epoch in range(__a ):
model.train()
for step, batch in enumerate(__a ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
_a : Dict = model(**__a )
_a : int = outputs.loss
_a : Any = loss / gradient_accumulation_steps
accelerator.backward(__a )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__a ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_a : Union[str, Any] = model(**__a )
_a : Tuple = outputs.logits.argmax(dim=-1 )
_a, _a : Any = accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=__a , references=__a , )
_a : List[Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , __a )
# New Code #
# We also run predictions on the test set at the very end
_a : Any = []
for step, batch in enumerate(__a ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_a : Tuple = model(**__a )
_a : Dict = outputs.logits
_a, _a : Optional[int] = accelerator.gather_for_metrics((predictions, batch['labels']) )
fold_predictions.append(predictions.cpu() )
if i == 0:
# We need all of the test predictions
test_references.append(references.cpu() )
# Use accelerator.print to print only on the main process.
test_predictions.append(torch.cat(__a , dim=0 ) )
# We now need to release all our memory and get rid of the current model, optimizer, etc
accelerator.free_memory()
# New Code #
# Finally we check the accuracy of our folded results:
_a : Dict = torch.cat(__a , dim=0 )
_a : Any = torch.stack(__a , dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 )
_a : str = metric.compute(predictions=__a , references=__a )
accelerator.print('Average test metrics from all folds:' , __a )
def UpperCAmelCase_ ():
"""simple docstring"""
_a : Any = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' , type=__a , default=__a , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
# New Code #
parser.add_argument('--num_folds' , type=__a , default=3 , help='The number of splits to perform across the dataset' )
_a : Any = parser.parse_args()
_a : int = {'lr': 2e-5, 'num_epochs': 3, 'seed': 4_2, 'batch_size': 1_6}
training_function(__a , __a )
if __name__ == "__main__":
main()
| 271 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available
__lowerCAmelCase = {"""tokenization_herbert""": ["""HerbertTokenizer"""]}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = ["""HerbertTokenizerFast"""]
if TYPE_CHECKING:
from .tokenization_herbert import HerbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_herbert_fast import HerbertTokenizerFast
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 271 |
'''simple docstring'''
from __future__ import annotations
__lowerCAmelCase = [-1_0, -5, 0, 5, 5.1, 1_1, 1_3, 2_1, 3, 4, -2_1, -1_0, -5, -1, 0]
__lowerCAmelCase = [-5, 0, 5, 5.1, 1_1, 1_3, 2_1, -1, 4, -1, -1_0, -5, -1, 0, -1]
def UpperCAmelCase_ (__a : list[float] ):
"""simple docstring"""
_a : Optional[int] = []
_a : int = len(__a )
for i in range(__a ):
_a : float = -1
for j in range(i + 1 , __a ):
if arr[i] < arr[j]:
_a : Any = arr[j]
break
result.append(__a )
return result
def UpperCAmelCase_ (__a : list[float] ):
"""simple docstring"""
_a : Tuple = []
for i, outer in enumerate(__a ):
_a : float = -1
for inner in arr[i + 1 :]:
if outer < inner:
_a : Dict = inner
break
result.append(__a )
return result
def UpperCAmelCase_ (__a : list[float] ):
"""simple docstring"""
_a : int = len(__a )
_a : list[float] = []
_a : list[float] = [-1] * arr_size
for index in reversed(range(__a ) ):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
_a : Dict = stack[-1]
stack.append(arr[index] )
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
__lowerCAmelCase = (
"""from __main__ import arr, next_greatest_element_slow, """
"""next_greatest_element_fast, next_greatest_element"""
)
print(
"""next_greatest_element_slow():""",
timeit("""next_greatest_element_slow(arr)""", setup=setup),
)
print(
"""next_greatest_element_fast():""",
timeit("""next_greatest_element_fast(arr)""", setup=setup),
)
print(
""" next_greatest_element():""",
timeit("""next_greatest_element(arr)""", setup=setup),
)
| 271 | 1 |
'''simple docstring'''
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
def __init__( self : Tuple ,_a : NestedDataStructureLike[PathLike] ,_a : Optional[NamedSplit] = None ,_a : Optional[Features] = None ,_a : str = None ,_a : bool = False ,_a : bool = False ,_a : Optional[str] = None ,_a : Optional[int] = None ,**_a : Union[str, Any] ,):
'''simple docstring'''
super().__init__(
_a ,split=_a ,features=_a ,cache_dir=_a ,keep_in_memory=_a ,streaming=_a ,num_proc=_a ,**_a ,)
_a : List[Any] = field
_a : List[Any] = path_or_paths if isinstance(_a ,_a ) else {self.split: path_or_paths}
_a : int = Json(
cache_dir=_a ,data_files=_a ,features=_a ,field=_a ,**_a ,)
def __lowercase ( self : str ):
'''simple docstring'''
if self.streaming:
_a : Tuple = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
_a : Any = None
_a : Optional[Any] = None
_a : Dict = None
_a : Dict = None
self.builder.download_and_prepare(
download_config=_a ,download_mode=_a ,verification_mode=_a ,base_path=_a ,num_proc=self.num_proc ,)
_a : List[Any] = self.builder.as_dataset(
split=self.split ,verification_mode=_a ,in_memory=self.keep_in_memory )
return dataset
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self : Any ,_a : Dataset ,_a : Union[PathLike, BinaryIO] ,_a : Optional[int] = None ,_a : Optional[int] = None ,**_a : Any ,):
'''simple docstring'''
if num_proc is not None and num_proc <= 0:
raise ValueError(F"""num_proc {num_proc} must be an integer > 0.""" )
_a : Union[str, Any] = dataset
_a : Optional[Any] = path_or_buf
_a : Optional[int] = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
_a : Any = num_proc
_a : Optional[Any] = 'utf-8'
_a : Any = to_json_kwargs
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
_a : List[str] = self.to_json_kwargs.pop('path_or_buf' ,_a )
_a : List[str] = self.to_json_kwargs.pop('orient' ,'records' )
_a : Dict = self.to_json_kwargs.pop('lines' ,True if orient == 'records' else False )
_a : Optional[Any] = self.to_json_kwargs.pop('index' ,False if orient in ['split', 'table'] else True )
_a : Union[str, Any] = self.to_json_kwargs.pop('compression' ,_a )
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(F"""`datasets` currently does not support {compression} compression""" )
if isinstance(self.path_or_buf ,(str, bytes, os.PathLike) ):
with fsspec.open(self.path_or_buf ,'wb' ,compression=_a ) as buffer:
_a : str = self._write(file_obj=_a ,orient=_a ,lines=_a ,index=_a ,**self.to_json_kwargs )
else:
if compression:
raise NotImplementedError(
F"""The compression parameter is not supported when writing to a buffer, but compression={compression}"""
' was passed. Please provide a local path instead.' )
_a : List[Any] = self._write(
file_obj=self.path_or_buf ,orient=_a ,lines=_a ,index=_a ,**self.to_json_kwargs )
return written
def __lowercase ( self : Dict ,_a : Optional[Any] ):
'''simple docstring'''
_a, _a, _a, _a, _a : Optional[Any] = args
_a : Any = query_table(
table=self.dataset.data ,key=slice(_a ,offset + self.batch_size ) ,indices=self.dataset._indices ,)
_a : List[Any] = batch.to_pandas().to_json(
path_or_buf=_a ,orient=_a ,lines=_a ,index=_a ,**_a )
if not json_str.endswith('\n' ):
json_str += "\n"
return json_str.encode(self.encoding )
def __lowercase ( self : Any ,_a : BinaryIO ,_a : Optional[int] ,_a : Any ,_a : Union[str, Any] ,**_a : str ,):
'''simple docstring'''
_a : Dict = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 ,len(self.dataset ) ,self.batch_size ) ,unit='ba' ,disable=not logging.is_progress_bar_enabled() ,desc='Creating json from Arrow format' ,):
_a : Any = self._batch_json((offset, orient, lines, index, to_json_kwargs) )
written += file_obj.write(_a )
else:
_a, _a : Optional[int] = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json ,[(offset, orient, lines, index, to_json_kwargs) for offset in range(0 ,_a ,_a )] ,) ,total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size ,unit='ba' ,disable=not logging.is_progress_bar_enabled() ,desc='Creating json from Arrow format' ,):
written += file_obj.write(_a )
return written
| 271 |
'''simple docstring'''
import os
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home
__lowerCAmelCase = HUGGINGFACE_HUB_CACHE
__lowerCAmelCase = """config.json"""
__lowerCAmelCase = """diffusion_pytorch_model.bin"""
__lowerCAmelCase = """diffusion_flax_model.msgpack"""
__lowerCAmelCase = """model.onnx"""
__lowerCAmelCase = """diffusion_pytorch_model.safetensors"""
__lowerCAmelCase = """weights.pb"""
__lowerCAmelCase = """https://huggingface.co"""
__lowerCAmelCase = default_cache_path
__lowerCAmelCase = """diffusers_modules"""
__lowerCAmelCase = os.getenv("""HF_MODULES_CACHE""", os.path.join(hf_cache_home, """modules"""))
__lowerCAmelCase = ["""fp16""", """non-ema"""]
__lowerCAmelCase = """.self_attn"""
| 271 | 1 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
from filelock import FileLock
from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available
__lowerCAmelCase = logging.getLogger(__name__)
@dataclass
class UpperCAmelCase__ :
"""simple docstring"""
__UpperCAmelCase : str
__UpperCAmelCase : List[str]
__UpperCAmelCase : Optional[List[str]]
@dataclass
class UpperCAmelCase__ :
"""simple docstring"""
__UpperCAmelCase : List[int]
__UpperCAmelCase : List[int]
__UpperCAmelCase : Optional[List[int]] = None
__UpperCAmelCase : Optional[List[int]] = None
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
__UpperCAmelCase : Dict = '''train'''
__UpperCAmelCase : Optional[int] = '''dev'''
__UpperCAmelCase : Optional[Any] = '''test'''
class UpperCAmelCase__ :
"""simple docstring"""
@staticmethod
def __lowercase ( _a : int ,_a : Union[Split, str] ):
'''simple docstring'''
raise NotImplementedError
@staticmethod
def __lowercase ( _a : str ):
'''simple docstring'''
raise NotImplementedError
@staticmethod
def __lowercase ( _a : List[InputExample] ,_a : List[str] ,_a : int ,_a : PreTrainedTokenizer ,_a : List[str]=False ,_a : Tuple="[CLS]" ,_a : Optional[int]=1 ,_a : List[Any]="[SEP]" ,_a : List[str]=False ,_a : Optional[int]=False ,_a : Optional[Any]=0 ,_a : List[str]=0 ,_a : Tuple=-100 ,_a : Optional[int]=0 ,_a : int=True ,):
'''simple docstring'''
_a : Optional[Any] = {label: i for i, label in enumerate(_a )}
_a : str = []
for ex_index, example in enumerate(_a ):
if ex_index % 1_0000 == 0:
logger.info('Writing example %d of %d' ,_a ,len(_a ) )
_a : List[Any] = []
_a : List[str] = []
for word, label in zip(example.words ,example.labels ):
_a : Dict = tokenizer.tokenize(_a )
# bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space.
if len(_a ) > 0:
tokens.extend(_a )
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(_a ) - 1) )
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
_a : Dict = tokenizer.num_special_tokens_to_add()
if len(_a ) > max_seq_length - special_tokens_count:
_a : int = tokens[: (max_seq_length - special_tokens_count)]
_a : Optional[Any] = label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
label_ids += [pad_token_label_id]
_a : Optional[Any] = [sequence_a_segment_id] * len(_a )
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
_a : str = [cls_token] + tokens
_a : List[Any] = [pad_token_label_id] + label_ids
_a : Optional[int] = [cls_token_segment_id] + segment_ids
_a : List[Any] = tokenizer.convert_tokens_to_ids(_a )
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
_a : Optional[int] = [1 if mask_padding_with_zero else 0] * len(_a )
# Zero-pad up to the sequence length.
_a : Tuple = max_seq_length - len(_a )
if pad_on_left:
_a : Dict = ([pad_token] * padding_length) + input_ids
_a : List[str] = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
_a : int = ([pad_token_segment_id] * padding_length) + segment_ids
_a : List[Any] = ([pad_token_label_id] * padding_length) + label_ids
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
assert len(_a ) == max_seq_length
assert len(_a ) == max_seq_length
assert len(_a ) == max_seq_length
assert len(_a ) == max_seq_length
if ex_index < 5:
logger.info('*** Example ***' )
logger.info('guid: %s' ,example.guid )
logger.info('tokens: %s' ,' '.join([str(_a ) for x in tokens] ) )
logger.info('input_ids: %s' ,' '.join([str(_a ) for x in input_ids] ) )
logger.info('input_mask: %s' ,' '.join([str(_a ) for x in input_mask] ) )
logger.info('segment_ids: %s' ,' '.join([str(_a ) for x in segment_ids] ) )
logger.info('label_ids: %s' ,' '.join([str(_a ) for x in label_ids] ) )
if "token_type_ids" not in tokenizer.model_input_names:
_a : Tuple = None
features.append(
InputFeatures(
input_ids=_a ,attention_mask=_a ,token_type_ids=_a ,label_ids=_a ) )
return features
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
__UpperCAmelCase : List[InputFeatures]
__UpperCAmelCase : int = nn.CrossEntropyLoss().ignore_index
def __init__( self : int ,_a : TokenClassificationTask ,_a : str ,_a : PreTrainedTokenizer ,_a : List[str] ,_a : str ,_a : Optional[int] = None ,_a : Any=False ,_a : Split = Split.train ,):
'''simple docstring'''
_a : Optional[int] = os.path.join(
_a ,'cached_{}_{}_{}'.format(mode.value ,tokenizer.__class__.__name__ ,str(_a ) ) ,)
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
_a : Dict = cached_features_file + '.lock'
with FileLock(_a ):
if os.path.exists(_a ) and not overwrite_cache:
logger.info(F"""Loading features from cached file {cached_features_file}""" )
_a : Optional[Any] = torch.load(_a )
else:
logger.info(F"""Creating features from dataset file at {data_dir}""" )
_a : Tuple = token_classification_task.read_examples_from_file(_a ,_a )
# TODO clean up all this to leverage built-in features of tokenizers
_a : Dict = token_classification_task.convert_examples_to_features(
_a ,_a ,_a ,_a ,cls_token_at_end=bool(model_type in ['xlnet'] ) ,cls_token=tokenizer.cls_token ,cls_token_segment_id=2 if model_type in ['xlnet'] else 0 ,sep_token=tokenizer.sep_token ,sep_token_extra=_a ,pad_on_left=bool(tokenizer.padding_side == 'left' ) ,pad_token=tokenizer.pad_token_id ,pad_token_segment_id=tokenizer.pad_token_type_id ,pad_token_label_id=self.pad_token_label_id ,)
logger.info(F"""Saving features into cached file {cached_features_file}""" )
torch.save(self.features ,_a )
def __len__( self : List[Any] ):
'''simple docstring'''
return len(self.features )
def __getitem__( self : Tuple ,_a : Any ):
'''simple docstring'''
return self.features[i]
if is_tf_available():
import tensorflow as tf
class UpperCAmelCase__ :
"""simple docstring"""
__UpperCAmelCase : List[InputFeatures]
__UpperCAmelCase : int = -100
def __init__( self : Tuple ,_a : TokenClassificationTask ,_a : str ,_a : PreTrainedTokenizer ,_a : List[str] ,_a : str ,_a : Optional[int] = None ,_a : Union[str, Any]=False ,_a : Split = Split.train ,):
'''simple docstring'''
_a : Optional[int] = token_classification_task.read_examples_from_file(_a ,_a )
# TODO clean up all this to leverage built-in features of tokenizers
_a : str = token_classification_task.convert_examples_to_features(
_a ,_a ,_a ,_a ,cls_token_at_end=bool(model_type in ['xlnet'] ) ,cls_token=tokenizer.cls_token ,cls_token_segment_id=2 if model_type in ['xlnet'] else 0 ,sep_token=tokenizer.sep_token ,sep_token_extra=_a ,pad_on_left=bool(tokenizer.padding_side == 'left' ) ,pad_token=tokenizer.pad_token_id ,pad_token_segment_id=tokenizer.pad_token_type_id ,pad_token_label_id=self.pad_token_label_id ,)
def gen():
for ex in self.features:
if ex.token_type_ids is None:
yield (
{"input_ids": ex.input_ids, "attention_mask": ex.attention_mask},
ex.label_ids,
)
else:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label_ids,
)
if "token_type_ids" not in tokenizer.model_input_names:
_a : Union[str, Any] = tf.data.Dataset.from_generator(
_a ,({'input_ids': tf.intaa, 'attention_mask': tf.intaa}, tf.intaa) ,(
{'input_ids': tf.TensorShape([None] ), 'attention_mask': tf.TensorShape([None] )},
tf.TensorShape([None] ),
) ,)
else:
_a : int = tf.data.Dataset.from_generator(
_a ,({'input_ids': tf.intaa, 'attention_mask': tf.intaa, 'token_type_ids': tf.intaa}, tf.intaa) ,(
{
'input_ids': tf.TensorShape([None] ),
'attention_mask': tf.TensorShape([None] ),
'token_type_ids': tf.TensorShape([None] ),
},
tf.TensorShape([None] ),
) ,)
def __lowercase ( self : int ):
'''simple docstring'''
_a : Any = self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features ) ) )
return self.dataset
def __len__( self : Optional[int] ):
'''simple docstring'''
return len(self.features )
def __getitem__( self : Any ,_a : Any ):
'''simple docstring'''
return self.features[i]
| 271 |
'''simple docstring'''
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self : int ,_a : Any ,_a : Optional[int]=2 ,_a : Optional[Any]=True ,_a : Dict=False ,_a : Dict=10 ,_a : Any=3 ,_a : str=32 * 8 ,_a : Optional[int]=32 * 8 ,_a : int=4 ,_a : str=64 ,):
'''simple docstring'''
_a : Dict = parent
_a : Union[str, Any] = batch_size
_a : Tuple = is_training
_a : List[str] = use_auxiliary_loss
_a : Optional[Any] = num_queries
_a : str = num_channels
_a : List[str] = min_size
_a : int = max_size
_a : Optional[int] = num_labels
_a : List[str] = hidden_dim
_a : int = hidden_dim
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
_a : Tuple = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
_a )
_a : Optional[Any] = torch.ones([self.batch_size, self.min_size, self.max_size] ,device=_a )
_a : Union[str, Any] = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] ,device=_a ) > 0.5
).float()
_a : Tuple = (torch.rand((self.batch_size, self.num_labels) ,device=_a ) > 0.5).long()
_a : Dict = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
_a : int = MaskaFormerConfig(
hidden_size=self.hidden_dim ,)
_a : str = self.num_queries
_a : Union[str, Any] = self.num_labels
_a : Tuple = [1, 1, 1, 1]
_a : Dict = self.num_channels
_a : str = 64
_a : Tuple = 128
_a : Optional[Any] = self.hidden_dim
_a : Union[str, Any] = self.hidden_dim
_a : List[Any] = self.hidden_dim
return config
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
_a, _a, _a, _a, _a : Optional[Any] = self.prepare_config_and_inputs()
_a : str = {'pixel_values': pixel_values, 'pixel_mask': pixel_mask}
return config, inputs_dict
def __lowercase ( self : List[str] ,_a : Optional[Any] ,_a : str ):
'''simple docstring'''
_a : str = output.encoder_hidden_states
_a : Any = output.pixel_decoder_hidden_states
_a : Optional[Any] = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(_a ) ,len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_a ) ,len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_a ) ,config.decoder_layers )
def __lowercase ( self : List[str] ,_a : str ,_a : List[Any] ,_a : Any ,_a : Union[str, Any]=False ):
'''simple docstring'''
with torch.no_grad():
_a : str = MaskaFormerModel(config=_a )
model.to(_a )
model.eval()
_a : Any = model(pixel_values=_a ,pixel_mask=_a )
_a : Optional[Any] = model(_a ,output_hidden_states=_a )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape ,(self.batch_size, self.num_queries, self.hidden_dim) ,)
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(_a ,_a )
def __lowercase ( self : Tuple ,_a : List[Any] ,_a : Union[str, Any] ,_a : Tuple ,_a : List[str] ,_a : Any ):
'''simple docstring'''
_a : int = MaskaFormerForUniversalSegmentation(config=_a )
model.to(_a )
model.eval()
def comm_check_on_output(_a : Any ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape ,(self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) ,)
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape ,(self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
_a : Any = model(pixel_values=_a ,pixel_mask=_a )
_a : Optional[int] = model(_a )
comm_check_on_output(_a )
_a : List[str] = model(
pixel_values=_a ,pixel_mask=_a ,mask_labels=_a ,class_labels=_a )
comm_check_on_output(_a )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape ,torch.Size([1] ) )
@require_torch
class UpperCAmelCase__ ( lowercase__ , lowercase__ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
__UpperCAmelCase : Dict = {'''feature-extraction''': MaskaFormerModel} if is_torch_available() else {}
__UpperCAmelCase : Dict = False
__UpperCAmelCase : Tuple = False
__UpperCAmelCase : Dict = False
__UpperCAmelCase : List[Any] = False
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
_a : Union[str, Any] = MaskaFormerModelTester(self )
_a : Dict = ConfigTester(self ,config_class=_a ,has_text_modality=_a )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
_a, _a : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(_a ,**_a ,output_hidden_states=_a )
def __lowercase ( self : str ):
'''simple docstring'''
_a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*_a )
@unittest.skip(reason='Mask2Former does not use inputs_embeds' )
def __lowercase ( self : Any ):
'''simple docstring'''
pass
@unittest.skip(reason='Mask2Former does not have a get_input_embeddings method' )
def __lowercase ( self : str ):
'''simple docstring'''
pass
@unittest.skip(reason='Mask2Former is not a generative model' )
def __lowercase ( self : List[Any] ):
'''simple docstring'''
pass
@unittest.skip(reason='Mask2Former does not use token embeddings' )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(
reason='Mask2Former has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def __lowercase ( self : Dict ):
'''simple docstring'''
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def __lowercase ( self : List[Any] ):
'''simple docstring'''
pass
def __lowercase ( self : int ):
'''simple docstring'''
_a, _a : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : Union[str, Any] = model_class(_a )
_a : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a : Optional[Any] = [*signature.parameters.keys()]
_a : List[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] ,_a )
@slow
def __lowercase ( self : List[str] ):
'''simple docstring'''
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
_a : Dict = MaskaFormerModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def __lowercase ( self : List[Any] ):
'''simple docstring'''
_a : int = (self.model_tester.min_size,) * 2
_a : Any = {
'pixel_values': torch.randn((2, 3, *size) ,device=_a ),
'mask_labels': torch.randn((2, 10, *size) ,device=_a ),
'class_labels': torch.zeros(2 ,10 ,device=_a ).long(),
}
_a : List[Any] = self.model_tester.get_config()
_a : int = MaskaFormerForUniversalSegmentation(_a ).to(_a )
_a : str = model(**_a )
self.assertTrue(outputs.loss is not None )
def __lowercase ( self : List[str] ):
'''simple docstring'''
_a, _a : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(_a ,**_a ,output_hidden_states=_a )
def __lowercase ( self : int ):
'''simple docstring'''
_a, _a : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : Any = model_class(_a ).to(_a )
_a : Optional[int] = model(**_a ,output_attentions=_a )
self.assertTrue(outputs.attentions is not None )
def __lowercase ( self : Tuple ):
'''simple docstring'''
if not self.model_tester.is_training:
return
_a : List[str] = self.all_model_classes[1]
_a, _a, _a, _a, _a : List[str] = self.model_tester.prepare_config_and_inputs()
_a : Any = model_class(_a )
model.to(_a )
model.train()
_a : Union[str, Any] = model(_a ,mask_labels=_a ,class_labels=_a ).loss
loss.backward()
def __lowercase ( self : int ):
'''simple docstring'''
_a : int = self.all_model_classes[1]
_a, _a, _a, _a, _a : List[Any] = self.model_tester.prepare_config_and_inputs()
_a : str = True
_a : str = True
_a : List[str] = model_class(_a ).to(_a )
model.train()
_a : Optional[int] = model(_a ,mask_labels=_a ,class_labels=_a )
_a : Tuple = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
_a : str = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
_a : Dict = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
_a : List[str] = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=_a )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
__lowerCAmelCase = 1e-4
def UpperCAmelCase_ ():
"""simple docstring"""
_a : int = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_vision
@slow
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def __lowercase ( self : Any ):
'''simple docstring'''
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def __lowercase ( self : Any ):
'''simple docstring'''
_a : List[str] = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(_a )
_a : int = self.default_image_processor
_a : Tuple = prepare_img()
_a : Any = image_processor(_a ,return_tensors='pt' ).to(_a )
_a : Union[str, Any] = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_a ,(1, 3, 384, 384) )
with torch.no_grad():
_a : Optional[Any] = model(**_a )
_a : List[Any] = torch.tensor(
[[-0.2790, -1.0717, -1.1668], [-0.5128, -0.3128, -0.4987], [-0.5832, 0.1971, -0.0197]] ).to(_a )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] ,_a ,atol=_a ) )
_a : str = torch.tensor(
[[0.8973, 1.1847, 1.1776], [1.1934, 1.5040, 1.5128], [1.1153, 1.4486, 1.4951]] ).to(_a )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] ,_a ,atol=_a ) )
_a : Any = torch.tensor(
[[2.1152, 1.7000, -0.8603], [1.5808, 1.8004, -0.9353], [1.6043, 1.7495, -0.5999]] ).to(_a )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] ,_a ,atol=_a ) )
def __lowercase ( self : Tuple ):
'''simple docstring'''
_a : List[Any] = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(_a ).eval()
_a : Optional[Any] = self.default_image_processor
_a : List[Any] = prepare_img()
_a : str = image_processor(_a ,return_tensors='pt' ).to(_a )
_a : Any = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_a ,(1, 3, 384, 384) )
with torch.no_grad():
_a : Optional[int] = model(**_a )
# masks_queries_logits
_a : Dict = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape ,(1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
_a : Dict = [
[-8.7839, -9.0056, -8.8121],
[-7.4104, -7.0313, -6.5401],
[-6.6105, -6.3427, -6.4675],
]
_a : Optional[Any] = torch.tensor(_a ).to(_a )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] ,_a ,atol=_a ) )
# class_queries_logits
_a : str = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape ,(1, model.config.num_queries, model.config.num_labels + 1) )
_a : str = torch.tensor(
[
[1.8324, -8.0835, -4.1922],
[0.8450, -9.0050, -3.6053],
[0.3045, -7.7293, -3.0275],
] ).to(_a )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] ,_a ,atol=_a ) )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
_a : Any = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(_a ).eval()
_a : Tuple = self.default_image_processor
_a : Tuple = image_processor(
[np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] ,segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] ,return_tensors='pt' ,)
_a : str = inputs['pixel_values'].to(_a )
_a : str = [el.to(_a ) for el in inputs['mask_labels']]
_a : Dict = [el.to(_a ) for el in inputs['class_labels']]
with torch.no_grad():
_a : List[str] = model(**_a )
self.assertTrue(outputs.loss is not None )
| 271 | 1 |
'''simple docstring'''
def UpperCAmelCase_ (__a : int , __a : int ):
"""simple docstring"""
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 271 |
'''simple docstring'''
import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def UpperCAmelCase_ (__a : List[Any] ):
"""simple docstring"""
if (
(cp >= 0x4E_00 and cp <= 0x9F_FF)
or (cp >= 0x34_00 and cp <= 0x4D_BF) #
or (cp >= 0x2_00_00 and cp <= 0x2_A6_DF) #
or (cp >= 0x2_A7_00 and cp <= 0x2_B7_3F) #
or (cp >= 0x2_B7_40 and cp <= 0x2_B8_1F) #
or (cp >= 0x2_B8_20 and cp <= 0x2_CE_AF) #
or (cp >= 0xF9_00 and cp <= 0xFA_FF)
or (cp >= 0x2_F8_00 and cp <= 0x2_FA_1F) #
): #
return True
return False
def UpperCAmelCase_ (__a : str ):
"""simple docstring"""
for char in word:
_a : Union[str, Any] = ord(__a )
if not _is_chinese_char(__a ):
return 0
return 1
def UpperCAmelCase_ (__a : List[str] ):
"""simple docstring"""
_a : Dict = set()
for token in tokens:
_a : str = len(__a ) > 1 and is_chinese(__a )
if chinese_word:
word_set.add(__a )
_a : Optional[Any] = list(__a )
return word_list
def UpperCAmelCase_ (__a : List[str] , __a : set() ):
"""simple docstring"""
if not chinese_word_set:
return bert_tokens
_a : Optional[Any] = max([len(__a ) for w in chinese_word_set] )
_a : Optional[int] = bert_tokens
_a, _a : Any = 0, len(__a )
while start < end:
_a : Tuple = True
if is_chinese(bert_word[start] ):
_a : Union[str, Any] = min(end - start , __a )
for i in range(__a , 1 , -1 ):
_a : Optional[Any] = ''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
_a : Any = '##' + bert_word[j]
_a : Union[str, Any] = start + i
_a : int = False
break
if single_word:
start += 1
return bert_word
def UpperCAmelCase_ (__a : List[str] , __a : LTP , __a : BertTokenizer ):
"""simple docstring"""
_a : int = []
for i in range(0 , len(__a ) , 1_0_0 ):
_a : Union[str, Any] = ltp_tokenizer.seg(lines[i : i + 1_0_0] )[0]
_a : Optional[Any] = [get_chinese_word(__a ) for r in res]
ltp_res.extend(__a )
assert len(__a ) == len(__a )
_a : str = []
for i in range(0 , len(__a ) , 1_0_0 ):
_a : List[str] = bert_tokenizer(lines[i : i + 1_0_0] , add_special_tokens=__a , truncation=__a , max_length=5_1_2 )
bert_res.extend(res['input_ids'] )
assert len(__a ) == len(__a )
_a : List[str] = []
for input_ids, chinese_word in zip(__a , __a ):
_a : int = []
for id in input_ids:
_a : Optional[int] = bert_tokenizer._convert_id_to_token(__a )
input_tokens.append(__a )
_a : List[str] = add_sub_symbol(__a , __a )
_a : Tuple = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(__a ):
if token[:2] == "##":
_a : str = token[2:]
# save chinese tokens' pos
if len(__a ) == 1 and _is_chinese_char(ord(__a ) ):
ref_id.append(__a )
ref_ids.append(__a )
assert len(__a ) == len(__a )
return ref_ids
def UpperCAmelCase_ (__a : Optional[Any] ):
"""simple docstring"""
with open(args.file_name , 'r' , encoding='utf-8' ) as f:
_a : Dict = f.readlines()
_a : int = [line.strip() for line in data if len(__a ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
_a : int = LTP(args.ltp ) # faster in GPU device
_a : Tuple = BertTokenizer.from_pretrained(args.bert )
_a : int = prepare_ref(__a , __a , __a )
with open(args.save_path , 'w' , encoding='utf-8' ) as f:
_a : Optional[Any] = [json.dumps(__a ) + '\n' for ref in ref_ids]
f.writelines(__a )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser(description="""prepare_chinese_ref""")
parser.add_argument(
"""--file_name""",
type=str,
default="""./resources/chinese-demo.txt""",
help="""file need process, same as training data in lm""",
)
parser.add_argument(
"""--ltp""", type=str, default="""./resources/ltp""", help="""resources for LTP tokenizer, usually a path"""
)
parser.add_argument("""--bert""", type=str, default="""./resources/robert""", help="""resources for Bert tokenizer""")
parser.add_argument("""--save_path""", type=str, default="""./resources/ref.txt""", help="""path to save res""")
__lowerCAmelCase = parser.parse_args()
main(args)
| 271 | 1 |
'''simple docstring'''
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_outputs import (
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_regnet import RegNetConfig
__lowerCAmelCase = logging.get_logger(__name__)
# General docstring
__lowerCAmelCase = """RegNetConfig"""
# Base docstring
__lowerCAmelCase = """facebook/regnet-y-040"""
__lowerCAmelCase = [1, 1_0_8_8, 7, 7]
# Image classification docstring
__lowerCAmelCase = """facebook/regnet-y-040"""
__lowerCAmelCase = """tabby, tabby cat"""
__lowerCAmelCase = [
"""facebook/regnet-y-040""",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class UpperCAmelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self : List[str] ,_a : int ,_a : int ,_a : int = 3 ,_a : int = 1 ,_a : int = 1 ,_a : Optional[str] = "relu" ,):
'''simple docstring'''
super().__init__()
_a : Any = nn.Convad(
_a ,_a ,kernel_size=_a ,stride=_a ,padding=kernel_size // 2 ,groups=_a ,bias=_a ,)
_a : str = nn.BatchNormad(_a )
_a : Any = ACTaFN[activation] if activation is not None else nn.Identity()
def __lowercase ( self : List[str] ,_a : int ):
'''simple docstring'''
_a : Dict = self.convolution(_a )
_a : int = self.normalization(_a )
_a : int = self.activation(_a )
return hidden_state
class UpperCAmelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self : List[str] ,_a : RegNetConfig ):
'''simple docstring'''
super().__init__()
_a : str = RegNetConvLayer(
config.num_channels ,config.embedding_size ,kernel_size=3 ,stride=2 ,activation=config.hidden_act )
_a : Union[str, Any] = config.num_channels
def __lowercase ( self : List[Any] ,_a : int ):
'''simple docstring'''
_a : Optional[int] = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' )
_a : Any = self.embedder(_a )
return hidden_state
class UpperCAmelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self : List[Any] ,_a : int ,_a : int ,_a : int = 2 ):
'''simple docstring'''
super().__init__()
_a : Dict = nn.Convad(_a ,_a ,kernel_size=1 ,stride=_a ,bias=_a )
_a : Dict = nn.BatchNormad(_a )
def __lowercase ( self : str ,_a : Tensor ):
'''simple docstring'''
_a : int = self.convolution(_a )
_a : Optional[Any] = self.normalization(_a )
return hidden_state
class UpperCAmelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self : Tuple ,_a : int ,_a : int ):
'''simple docstring'''
super().__init__()
_a : str = nn.AdaptiveAvgPoolad((1, 1) )
_a : Union[str, Any] = nn.Sequential(
nn.Convad(_a ,_a ,kernel_size=1 ) ,nn.ReLU() ,nn.Convad(_a ,_a ,kernel_size=1 ) ,nn.Sigmoid() ,)
def __lowercase ( self : Union[str, Any] ,_a : str ):
'''simple docstring'''
_a : Tuple = self.pooler(_a )
_a : Tuple = self.attention(_a )
_a : Optional[Any] = hidden_state * attention
return hidden_state
class UpperCAmelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self : Any ,_a : RegNetConfig ,_a : int ,_a : int ,_a : int = 1 ):
'''simple docstring'''
super().__init__()
_a : str = in_channels != out_channels or stride != 1
_a : Tuple = max(1 ,out_channels // config.groups_width )
_a : Union[str, Any] = (
RegNetShortCut(_a ,_a ,stride=_a ) if should_apply_shortcut else nn.Identity()
)
_a : Tuple = nn.Sequential(
RegNetConvLayer(_a ,_a ,kernel_size=1 ,activation=config.hidden_act ) ,RegNetConvLayer(_a ,_a ,stride=_a ,groups=_a ,activation=config.hidden_act ) ,RegNetConvLayer(_a ,_a ,kernel_size=1 ,activation=_a ) ,)
_a : Optional[Any] = ACTaFN[config.hidden_act]
def __lowercase ( self : Any ,_a : int ):
'''simple docstring'''
_a : List[Any] = hidden_state
_a : int = self.layer(_a )
_a : Any = self.shortcut(_a )
hidden_state += residual
_a : int = self.activation(_a )
return hidden_state
class UpperCAmelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self : Dict ,_a : RegNetConfig ,_a : int ,_a : int ,_a : int = 1 ):
'''simple docstring'''
super().__init__()
_a : Optional[int] = in_channels != out_channels or stride != 1
_a : int = max(1 ,out_channels // config.groups_width )
_a : Union[str, Any] = (
RegNetShortCut(_a ,_a ,stride=_a ) if should_apply_shortcut else nn.Identity()
)
_a : Optional[int] = nn.Sequential(
RegNetConvLayer(_a ,_a ,kernel_size=1 ,activation=config.hidden_act ) ,RegNetConvLayer(_a ,_a ,stride=_a ,groups=_a ,activation=config.hidden_act ) ,RegNetSELayer(_a ,reduced_channels=int(round(in_channels / 4 ) ) ) ,RegNetConvLayer(_a ,_a ,kernel_size=1 ,activation=_a ) ,)
_a : List[Any] = ACTaFN[config.hidden_act]
def __lowercase ( self : Dict ,_a : Union[str, Any] ):
'''simple docstring'''
_a : Any = hidden_state
_a : Tuple = self.layer(_a )
_a : int = self.shortcut(_a )
hidden_state += residual
_a : List[str] = self.activation(_a )
return hidden_state
class UpperCAmelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self : List[str] ,_a : RegNetConfig ,_a : int ,_a : int ,_a : int = 2 ,_a : int = 2 ,):
'''simple docstring'''
super().__init__()
_a : List[str] = RegNetXLayer if config.layer_type == 'x' else RegNetYLayer
_a : Optional[Any] = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(
_a ,_a ,_a ,stride=_a ,) ,*[layer(_a ,_a ,_a ) for _ in range(depth - 1 )] ,)
def __lowercase ( self : Any ,_a : Dict ):
'''simple docstring'''
_a : Optional[int] = self.layers(_a )
return hidden_state
class UpperCAmelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self : Any ,_a : RegNetConfig ):
'''simple docstring'''
super().__init__()
_a : Dict = nn.ModuleList([] )
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
RegNetStage(
_a ,config.embedding_size ,config.hidden_sizes[0] ,stride=2 if config.downsample_in_first_stage else 1 ,depth=config.depths[0] ,) )
_a : Dict = zip(config.hidden_sizes ,config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(_a ,config.depths[1:] ):
self.stages.append(RegNetStage(_a ,_a ,_a ,depth=_a ) )
def __lowercase ( self : str ,_a : Tensor ,_a : bool = False ,_a : bool = True ):
'''simple docstring'''
_a : List[str] = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
_a : Union[str, Any] = hidden_states + (hidden_state,)
_a : int = stage_module(_a )
if output_hidden_states:
_a : Any = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=_a ,hidden_states=_a )
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
__UpperCAmelCase : int = RegNetConfig
__UpperCAmelCase : Any = '''regnet'''
__UpperCAmelCase : Optional[Any] = '''pixel_values'''
__UpperCAmelCase : List[str] = True
def __lowercase ( self : str ,_a : Optional[int] ):
'''simple docstring'''
if isinstance(_a ,nn.Convad ):
nn.init.kaiming_normal_(module.weight ,mode='fan_out' ,nonlinearity='relu' )
elif isinstance(_a ,(nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight ,1 )
nn.init.constant_(module.bias ,0 )
def __lowercase ( self : Union[str, Any] ,_a : int ,_a : List[str]=False ):
'''simple docstring'''
if isinstance(_a ,_a ):
_a : Optional[int] = value
__lowerCAmelCase = r"""
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
__lowerCAmelCase = r"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConvNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
'''The bare RegNet model outputting raw features without any specific head on top.''' , lowercase__ , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
def __init__( self : int ,_a : int ):
'''simple docstring'''
super().__init__(_a )
_a : Union[str, Any] = config
_a : Union[str, Any] = RegNetEmbeddings(_a )
_a : str = RegNetEncoder(_a )
_a : Optional[int] = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_a )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC ,output_type=_a ,config_class=_CONFIG_FOR_DOC ,modality='vision' ,expected_output=_EXPECTED_OUTPUT_SHAPE ,)
def __lowercase ( self : Union[str, Any] ,_a : Tensor ,_a : Optional[bool] = None ,_a : Optional[bool] = None ):
'''simple docstring'''
_a : List[str] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_a : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict
_a : Optional[Any] = self.embedder(_a )
_a : Dict = self.encoder(
_a ,output_hidden_states=_a ,return_dict=_a )
_a : Tuple = encoder_outputs[0]
_a : Dict = self.pooler(_a )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=_a ,pooler_output=_a ,hidden_states=encoder_outputs.hidden_states ,)
@add_start_docstrings(
'''
RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
''' , lowercase__ , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
def __init__( self : str ,_a : Optional[Any] ):
'''simple docstring'''
super().__init__(_a )
_a : Dict = config.num_labels
_a : List[Any] = RegNetModel(_a )
# classification head
_a : Optional[int] = nn.Sequential(
nn.Flatten() ,nn.Linear(config.hidden_sizes[-1] ,config.num_labels ) if config.num_labels > 0 else nn.Identity() ,)
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_a )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT ,output_type=_a ,config_class=_CONFIG_FOR_DOC ,expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT ,)
def __lowercase ( self : List[Any] ,_a : Optional[torch.FloatTensor] = None ,_a : Optional[torch.LongTensor] = None ,_a : Optional[bool] = None ,_a : Optional[bool] = None ,):
'''simple docstring'''
_a : Any = return_dict if return_dict is not None else self.config.use_return_dict
_a : int = self.regnet(_a ,output_hidden_states=_a ,return_dict=_a )
_a : str = outputs.pooler_output if return_dict else outputs[1]
_a : Optional[Any] = self.classifier(_a )
_a : Dict = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
_a : Tuple = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
_a : Tuple = 'single_label_classification'
else:
_a : Optional[int] = 'multi_label_classification'
if self.config.problem_type == "regression":
_a : Tuple = MSELoss()
if self.num_labels == 1:
_a : Union[str, Any] = loss_fct(logits.squeeze() ,labels.squeeze() )
else:
_a : List[Any] = loss_fct(_a ,_a )
elif self.config.problem_type == "single_label_classification":
_a : Tuple = CrossEntropyLoss()
_a : List[str] = loss_fct(logits.view(-1 ,self.num_labels ) ,labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
_a : List[str] = BCEWithLogitsLoss()
_a : str = loss_fct(_a ,_a )
if not return_dict:
_a : int = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=_a ,logits=_a ,hidden_states=outputs.hidden_states )
| 271 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_videomae import VideoMAEImageProcessor
__lowerCAmelCase = logging.get_logger(__name__)
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
def __init__( self : Tuple ,*_a : List[str] ,**_a : Any ):
'''simple docstring'''
warnings.warn(
'The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use VideoMAEImageProcessor instead.' ,_a ,)
super().__init__(*_a ,**_a )
| 271 | 1 |
'''simple docstring'''
import os
import unittest
from transformers import FunnelTokenizer, FunnelTokenizerFast
from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase__ ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Any = FunnelTokenizer
__UpperCAmelCase : List[str] = FunnelTokenizerFast
__UpperCAmelCase : Optional[Any] = True
__UpperCAmelCase : Tuple = True
def __lowercase ( self : Tuple ):
'''simple docstring'''
super().setUp()
_a : Any = [
'<unk>',
'<cls>',
'<sep>',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
_a : Optional[Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file ,'w' ,encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def __lowercase ( self : int ,**_a : Dict ):
'''simple docstring'''
return FunnelTokenizer.from_pretrained(self.tmpdirname ,**_a )
def __lowercase ( self : List[str] ,**_a : List[Any] ):
'''simple docstring'''
return FunnelTokenizerFast.from_pretrained(self.tmpdirname ,**_a )
def __lowercase ( self : Dict ,_a : str ):
'''simple docstring'''
_a : Union[str, Any] = 'UNwant\u00E9d,running'
_a : int = 'unwanted, running'
return input_text, output_text
def __lowercase ( self : Any ):
'''simple docstring'''
_a : List[Any] = self.tokenizer_class(self.vocab_file )
_a : str = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(_a ,['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) ,[7, 4, 5, 10, 8, 9] )
def __lowercase ( self : Any ):
'''simple docstring'''
_a : Tuple = self.get_tokenizers(do_lower_case=_a )
for tokenizer in tokenizers:
_a : str = tokenizer('UNwant\u00E9d,running' )
_a : Optional[int] = len(inputs['input_ids'] ) - 1
self.assertListEqual(inputs['token_type_ids'] ,[2] + [0] * sentence_len )
_a : Any = tokenizer('UNwant\u00E9d,running' ,'UNwant\u00E9d,running' )
self.assertListEqual(inputs['token_type_ids'] ,[2] + [0] * sentence_len + [1] * sentence_len )
| 271 |
'''simple docstring'''
from __future__ import annotations
from random import choice
def UpperCAmelCase_ (__a : str ):
"""simple docstring"""
return choice(__a )
def UpperCAmelCase_ (__a : list[int] , __a : int ):
"""simple docstring"""
_a : Dict = random_pivot(__a )
# partition based on pivot
# linear time
_a : Optional[int] = [e for e in lst if e < pivot]
_a : List[str] = [e for e in lst if e > pivot]
# if we get lucky, pivot might be the element we want.
# we can easily see this:
# small (elements smaller than k)
# + pivot (kth element)
# + big (elements larger than k)
if len(__a ) == k - 1:
return pivot
# pivot is in elements bigger than k
elif len(__a ) < k - 1:
return kth_number(__a , k - len(__a ) - 1 )
# pivot is in elements smaller than k
else:
return kth_number(__a , __a )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 271 | 1 |
'''simple docstring'''
def UpperCAmelCase_ (__a : int , __a : int ):
"""simple docstring"""
return "\n".join(
f"""{number} * {i} = {number * i}""" for i in range(1 , number_of_terms + 1 ) )
if __name__ == "__main__":
print(multiplication_table(number=5, number_of_terms=1_0))
| 271 |
'''simple docstring'''
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self : Dict ):
'''simple docstring'''
_a : Dict = {}
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
print(self.vertex )
for i in self.vertex:
print(_a ,' -> ' ,' -> '.join([str(_a ) for j in self.vertex[i]] ) )
def __lowercase ( self : Dict ,_a : int ,_a : int ):
'''simple docstring'''
if from_vertex in self.vertex:
self.vertex[from_vertex].append(_a )
else:
# else make a new vertex
_a : int = [to_vertex]
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
_a : Tuple = [False] * len(self.vertex )
# call the recursive helper function
for i in range(len(self.vertex ) ):
if not visited[i]:
self.dfs_recursive(_a ,_a )
def __lowercase ( self : Union[str, Any] ,_a : int ,_a : list ):
'''simple docstring'''
_a : List[Any] = True
print(_a ,end=' ' )
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(_a ,_a )
if __name__ == "__main__":
__lowerCAmelCase = Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print("""DFS:""")
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3
| 271 | 1 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
__lowerCAmelCase = logging.get_logger(__name__)
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
def __init__( self : int ,*_a : Optional[int] ,**_a : str ):
'''simple docstring'''
warnings.warn(
'The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use BeitImageProcessor instead.' ,_a ,)
super().__init__(*_a ,**_a )
| 271 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = """▁"""
__lowerCAmelCase = {"""vocab_file""": """sentencepiece.bpe.model""", """monolingual_vocab_file""": """dict.txt"""}
__lowerCAmelCase = {
"""vocab_file""": {
"""vinai/bartpho-syllable""": """https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model""",
},
"""monolingual_vocab_file""": {
"""vinai/bartpho-syllable""": """https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt""",
},
}
__lowerCAmelCase = {"""vinai/bartpho-syllable""": 1_0_2_4}
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = VOCAB_FILES_NAMES
__UpperCAmelCase : Dict = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : Dict = ['''input_ids''', '''attention_mask''']
def __init__( self : str ,_a : str ,_a : Any ,_a : Any="<s>" ,_a : Dict="</s>" ,_a : int="</s>" ,_a : Union[str, Any]="<s>" ,_a : List[Any]="<unk>" ,_a : Optional[Any]="<pad>" ,_a : List[str]="<mask>" ,_a : Optional[Dict[str, Any]] = None ,**_a : int ,):
'''simple docstring'''
_a : Any = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else mask_token
_a : Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_a ,eos_token=_a ,unk_token=_a ,sep_token=_a ,cls_token=_a ,pad_token=_a ,mask_token=_a ,sp_model_kwargs=self.sp_model_kwargs ,**_a ,)
_a : Optional[int] = vocab_file
_a : Union[str, Any] = monolingual_vocab_file
_a : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_a ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
_a : Union[str, Any] = {}
_a : int = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(_a ) not in self.fairseq_tokens_to_ids:
_a : int = cnt
cnt += 1
with open(_a ,'r' ,encoding='utf-8' ) as f:
for line in f.readlines():
_a : str = line.strip().split()[0]
_a : Tuple = len(self.fairseq_tokens_to_ids )
if str(_a ) not in self.fairseq_tokens_to_ids:
_a : List[str] = len(self.fairseq_tokens_to_ids )
_a : Tuple = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : Union[str, Any] ):
'''simple docstring'''
_a : int = self.__dict__.copy()
_a : str = None
_a : Optional[Any] = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Tuple ,_a : Tuple ):
'''simple docstring'''
_a : Tuple = d
# for backward compatibility
if not hasattr(self ,'sp_model_kwargs' ):
_a : List[str] = {}
_a : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def __lowercase ( self : Dict ,_a : List[int] ,_a : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_a : Dict = [self.cls_token_id]
_a : int = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __lowercase ( self : int ,_a : List[int] ,_a : Optional[List[int]] = None ,_a : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_a ,token_ids_a=_a ,already_has_special_tokens=_a )
if token_ids_a is None:
return [1] + ([0] * len(_a )) + [1]
return [1] + ([0] * len(_a )) + [1, 1] + ([0] * len(_a )) + [1]
def __lowercase ( self : Tuple ,_a : List[int] ,_a : Optional[List[int]] = None ):
'''simple docstring'''
_a : List[str] = [self.sep_token_id]
_a : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __lowercase ( self : Dict ):
'''simple docstring'''
return len(self.fairseq_ids_to_tokens )
def __lowercase ( self : Dict ):
'''simple docstring'''
_a : List[str] = {self.convert_ids_to_tokens(_a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __lowercase ( self : Tuple ,_a : str ):
'''simple docstring'''
return self.sp_model.encode(_a ,out_type=_a )
def __lowercase ( self : Union[str, Any] ,_a : Union[str, Any] ):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def __lowercase ( self : Any ,_a : int ):
'''simple docstring'''
return self.fairseq_ids_to_tokens[index]
def __lowercase ( self : Tuple ,_a : Union[str, Any] ):
'''simple docstring'''
_a : str = ''.join(_a ).replace(_a ,' ' ).strip()
return out_string
def __lowercase ( self : Union[str, Any] ,_a : str ,_a : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(_a ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_a : int = os.path.join(
_a ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
_a : int = os.path.join(
_a ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['monolingual_vocab_file'] ,)
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,_a )
elif not os.path.isfile(self.vocab_file ):
with open(_a ,'wb' ) as fi:
_a : List[Any] = self.sp_model.serialized_model_proto()
fi.write(_a )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
_a ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file ,_a )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(_a ,'w' ,encoding='utf-8' ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(F"""{str(_a )} \n""" )
return out_vocab_file, out_monolingual_vocab_file
| 271 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__lowerCAmelCase = {
"""configuration_vision_text_dual_encoder""": ["""VisionTextDualEncoderConfig"""],
"""processing_vision_text_dual_encoder""": ["""VisionTextDualEncoderProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = ["""VisionTextDualEncoderModel"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = ["""FlaxVisionTextDualEncoderModel"""]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = ["""TFVisionTextDualEncoderModel"""]
if TYPE_CHECKING:
from .configuration_vision_text_dual_encoder import VisionTextDualEncoderConfig
from .processing_vision_text_dual_encoder import VisionTextDualEncoderProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_text_dual_encoder import VisionTextDualEncoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_text_dual_encoder import FlaxVisionTextDualEncoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_text_dual_encoder import TFVisionTextDualEncoderModel
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 271 |
'''simple docstring'''
import numpy as np
from transformers import BatchFeature
from transformers.testing_utils import require_tf, require_torch
from .test_feature_extraction_common import FeatureExtractionSavingTestMixin
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = None
__UpperCAmelCase : List[Any] = None
@property
def __lowercase ( self : Dict ):
'''simple docstring'''
return self.feat_extract_tester.prepare_feat_extract_dict()
def __lowercase ( self : str ):
'''simple docstring'''
_a : Dict = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(_a ,'feature_size' ) )
self.assertTrue(hasattr(_a ,'sampling_rate' ) )
self.assertTrue(hasattr(_a ,'padding_value' ) )
def __lowercase ( self : int ):
'''simple docstring'''
_a : Any = self.feat_extract_tester.prepare_inputs_for_common()
_a : str = self.feature_extraction_class(**self.feat_extract_dict )
_a : int = feat_extract.model_input_names[0]
_a : List[Any] = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(_a ) == len(_a ) for x, y in zip(_a ,processed_features[input_name] ) ) )
_a : Any = self.feat_extract_tester.prepare_inputs_for_common(equal_length=_a )
_a : Union[str, Any] = BatchFeature({input_name: speech_inputs} ,tensor_type='np' )
_a : Union[str, Any] = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
_a : Optional[int] = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_torch
def __lowercase ( self : Any ):
'''simple docstring'''
_a : List[Any] = self.feat_extract_tester.prepare_inputs_for_common(equal_length=_a )
_a : Dict = self.feature_extraction_class(**self.feat_extract_dict )
_a : int = feat_extract.model_input_names[0]
_a : str = BatchFeature({input_name: speech_inputs} ,tensor_type='pt' )
_a : str = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
_a : str = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_tf
def __lowercase ( self : int ):
'''simple docstring'''
_a : int = self.feat_extract_tester.prepare_inputs_for_common(equal_length=_a )
_a : Dict = self.feature_extraction_class(**self.feat_extract_dict )
_a : Tuple = feat_extract.model_input_names[0]
_a : int = BatchFeature({input_name: speech_inputs} ,tensor_type='tf' )
_a : Optional[int] = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
_a : Optional[Any] = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
def __lowercase ( self : Dict ,_a : Any=False ):
'''simple docstring'''
def _inputs_have_equal_length(_a : Tuple ):
_a : Tuple = len(input[0] )
for input_slice in input[1:]:
if len(_a ) != length:
return False
return True
def _inputs_are_equal(_a : Optional[Any] ,_a : Union[str, Any] ):
if len(_a ) != len(_a ):
return False
for input_slice_a, input_slice_a in zip(_a ,_a ):
if not np.allclose(np.asarray(_a ) ,np.asarray(_a ) ,atol=1E-3 ):
return False
return True
_a : int = self.feature_extraction_class(**self.feat_extract_dict )
_a : Tuple = self.feat_extract_tester.prepare_inputs_for_common(numpify=_a )
_a : Union[str, Any] = feat_extract.model_input_names[0]
_a : Tuple = BatchFeature({input_name: speech_inputs} )
_a : str = self.feat_extract_tester.seq_length_diff
_a : Dict = self.feat_extract_tester.max_seq_length + pad_diff
_a : Dict = self.feat_extract_tester.min_seq_length
_a : Optional[Any] = self.feat_extract_tester.batch_size
_a : Tuple = self.feat_extract_tester.feature_size
# test padding for List[int] + numpy
_a : int = feat_extract.pad(_a ,padding=_a )
_a : List[Any] = input_a[input_name]
_a : Tuple = feat_extract.pad(_a ,padding='longest' )
_a : Any = input_a[input_name]
_a : Optional[Any] = feat_extract.pad(_a ,padding='max_length' ,max_length=len(speech_inputs[-1] ) )
_a : List[str] = input_a[input_name]
_a : List[str] = feat_extract.pad(_a ,padding='longest' ,return_tensors='np' )
_a : str = input_a[input_name]
# max_length parameter has to be provided when setting `padding="max_length"`
with self.assertRaises(_a ):
feat_extract.pad(_a ,padding='max_length' )[input_name]
_a : int = feat_extract.pad(
_a ,padding='max_length' ,max_length=_a ,return_tensors='np' )
_a : Optional[int] = input_a[input_name]
self.assertFalse(_inputs_have_equal_length(_a ) )
self.assertTrue(_inputs_have_equal_length(_a ) )
self.assertTrue(_inputs_have_equal_length(_a ) )
self.assertTrue(_inputs_are_equal(_a ,_a ) )
self.assertTrue(len(input_a[0] ) == pad_min_length )
self.assertTrue(len(input_a[1] ) == pad_min_length + pad_diff )
self.assertTrue(input_a.shape[:2] == (batch_size, len(input_a[0] )) )
self.assertTrue(input_a.shape[:2] == (batch_size, pad_max_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == input_a.shape[2] == feature_size )
# test padding for `pad_to_multiple_of` for List[int] + numpy
_a : Tuple = feat_extract.pad(_a ,pad_to_multiple_of=10 )
_a : List[str] = input_a[input_name]
_a : str = feat_extract.pad(_a ,padding='longest' ,pad_to_multiple_of=10 )
_a : Tuple = input_a[input_name]
_a : Optional[int] = feat_extract.pad(
_a ,padding='max_length' ,pad_to_multiple_of=10 ,max_length=_a )
_a : Any = input_a[input_name]
_a : Optional[int] = feat_extract.pad(
_a ,padding='max_length' ,pad_to_multiple_of=10 ,max_length=_a ,return_tensors='np' ,)
_a : Dict = input_a[input_name]
self.assertTrue(all(len(_a ) % 10 == 0 for x in input_a ) )
self.assertTrue(_inputs_are_equal(_a ,_a ) )
_a : List[str] = pad_max_length if pad_max_length % 10 == 0 else (pad_max_length // 10 + 1) * 10
self.assertTrue(all(len(_a ) == expected_mult_pad_length for x in input_a ) )
self.assertEqual(input_a.shape[:2] ,(batch_size, expected_mult_pad_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == feature_size )
# Check padding value is correct
_a : Any = (np.ones(self.feat_extract_tester.feature_size ) * feat_extract.padding_value).sum()
self.assertTrue(
abs(np.asarray(input_a[0] )[pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) )
< 1E-3 )
self.assertTrue(
abs(
np.asarray(input_a[1] )[pad_min_length + pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - pad_diff) )
< 1E-3 )
self.assertTrue(
abs(
np.asarray(input_a[2] )[pad_min_length + 2 * pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - 2 * pad_diff) )
< 1E-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) ) < 1E-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (expected_mult_pad_length - pad_min_length) )
< 1E-3 )
def __lowercase ( self : List[Any] ,_a : Optional[int]=False ):
'''simple docstring'''
def _inputs_have_equal_length(_a : List[str] ):
_a : Union[str, Any] = len(input[0] )
for input_slice in input[1:]:
if len(_a ) != length:
return False
return True
def _inputs_are_equal(_a : List[str] ,_a : List[str] ):
if len(_a ) != len(_a ):
return False
for input_slice_a, input_slice_a in zip(_a ,_a ):
if not np.allclose(np.asarray(_a ) ,np.asarray(_a ) ,atol=1E-3 ):
return False
return True
_a : Dict = self.feature_extraction_class(**self.feat_extract_dict )
_a : str = self.feat_extract_tester.prepare_inputs_for_common(numpify=_a )
_a : Any = feat_extract.model_input_names[0]
_a : List[Any] = BatchFeature({input_name: speech_inputs} )
# truncate to smallest
_a : Union[str, Any] = feat_extract.pad(
_a ,padding='max_length' ,max_length=len(speech_inputs[0] ) ,truncation=_a )
_a : str = input_a[input_name]
_a : List[str] = feat_extract.pad(_a ,padding='max_length' ,max_length=len(speech_inputs[0] ) )
_a : Tuple = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(_a ) )
self.assertFalse(_inputs_have_equal_length(_a ) )
# truncate to smallest with np
_a : Dict = feat_extract.pad(
_a ,padding='max_length' ,max_length=len(speech_inputs[0] ) ,return_tensors='np' ,truncation=_a ,)
_a : Any = input_a[input_name]
_a : List[Any] = feat_extract.pad(
_a ,padding='max_length' ,max_length=len(speech_inputs[0] ) ,return_tensors='np' )
_a : int = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(_a ) )
self.assertTrue(input_a.shape[1] == len(speech_inputs[0] ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(_a ) )
# truncate to middle
_a : Dict = feat_extract.pad(
_a ,padding='max_length' ,max_length=len(speech_inputs[1] ) ,truncation=_a ,return_tensors='np' ,)
_a : List[Any] = input_a[input_name]
_a : Tuple = feat_extract.pad(
_a ,padding='max_length' ,max_length=len(speech_inputs[1] ) ,truncation=_a )
_a : Tuple = input_a[input_name]
_a : Tuple = feat_extract.pad(
_a ,padding='max_length' ,max_length=len(speech_inputs[1] ) ,return_tensors='np' )
_a : Dict = input_a[input_name]
self.assertTrue(input_a.shape[1] == len(speech_inputs[1] ) )
self.assertTrue(_inputs_have_equal_length(_a ) )
self.assertTrue(_inputs_have_equal_length(_a ) )
self.assertTrue(_inputs_are_equal(_a ,_a ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(_a ) )
self.assertTrue(len(input_a[-1] ) == len(speech_inputs[-1] ) )
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(_a ):
feat_extract.pad(_a ,truncation=_a )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(_a ):
feat_extract.pad(_a ,padding='longest' ,truncation=_a )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(_a ):
feat_extract.pad(_a ,padding='longest' ,truncation=_a )[input_name]
# max_length parameter has to be provided when setting `truncation=True` and padding="max_length"
with self.assertRaises(_a ):
feat_extract.pad(_a ,padding='max_length' ,truncation=_a )[input_name]
# test truncation for `pad_to_multiple_of` for List[int] + numpy
_a : Optional[Any] = 12
_a : List[Any] = feat_extract.pad(
_a ,padding='max_length' ,max_length=len(speech_inputs[0] ) ,pad_to_multiple_of=_a ,truncation=_a ,)
_a : Tuple = input_a[input_name]
_a : str = feat_extract.pad(
_a ,padding='max_length' ,max_length=len(speech_inputs[0] ) ,pad_to_multiple_of=_a ,)
_a : List[Any] = input_a[input_name]
# retrieve expected_length as multiple of pad_to_multiple_of
_a : List[Any] = len(speech_inputs[0] )
if expected_length % pad_to_multiple_of != 0:
_a : Union[str, Any] = ((len(speech_inputs[0] ) // pad_to_multiple_of) + 1) * pad_to_multiple_of
self.assertTrue(len(input_a[0] ) == expected_length )
self.assertTrue(_inputs_have_equal_length(_a ) )
self.assertFalse(_inputs_have_equal_length(_a ) )
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
self._check_padding(numpify=_a )
def __lowercase ( self : Tuple ):
'''simple docstring'''
self._check_padding(numpify=_a )
def __lowercase ( self : Dict ):
'''simple docstring'''
self._check_truncation(numpify=_a )
def __lowercase ( self : str ):
'''simple docstring'''
self._check_truncation(numpify=_a )
@require_torch
def __lowercase ( self : Dict ):
'''simple docstring'''
_a : Any = self.feature_extraction_class(**self.feat_extract_dict )
_a : List[Any] = self.feat_extract_tester.prepare_inputs_for_common()
_a : Union[str, Any] = feat_extract.model_input_names[0]
_a : Optional[int] = BatchFeature({input_name: speech_inputs} )
_a : List[Any] = feat_extract.pad(_a ,padding='longest' ,return_tensors='np' )[input_name]
_a : List[str] = feat_extract.pad(_a ,padding='longest' ,return_tensors='pt' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1E-2 )
@require_tf
def __lowercase ( self : int ):
'''simple docstring'''
_a : List[str] = self.feature_extraction_class(**self.feat_extract_dict )
_a : Optional[Any] = self.feat_extract_tester.prepare_inputs_for_common()
_a : Dict = feat_extract.model_input_names[0]
_a : Optional[Any] = BatchFeature({input_name: speech_inputs} )
_a : Dict = feat_extract.pad(_a ,padding='longest' ,return_tensors='np' )[input_name]
_a : Any = feat_extract.pad(_a ,padding='longest' ,return_tensors='tf' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_tf.numpy().astype(np.floataa ).sum() ) < 1E-2 )
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
_a : str = self.feat_extract_dict
_a : List[Any] = True
_a : Optional[int] = self.feature_extraction_class(**_a )
_a : List[Any] = self.feat_extract_tester.prepare_inputs_for_common()
_a : Tuple = [len(_a ) for x in speech_inputs]
_a : int = feat_extract.model_input_names[0]
_a : Optional[Any] = BatchFeature({input_name: speech_inputs} )
_a : str = feat_extract.pad(_a ,padding='longest' ,return_tensors='np' )
self.assertIn('attention_mask' ,_a )
self.assertListEqual(list(processed.attention_mask.shape ) ,list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() ,_a )
def __lowercase ( self : int ):
'''simple docstring'''
_a : Any = self.feat_extract_dict
_a : Tuple = True
_a : Optional[int] = self.feature_extraction_class(**_a )
_a : Dict = self.feat_extract_tester.prepare_inputs_for_common()
_a : Dict = [len(_a ) for x in speech_inputs]
_a : Union[str, Any] = feat_extract.model_input_names[0]
_a : Any = BatchFeature({input_name: speech_inputs} )
_a : List[Any] = min(_a )
_a : Dict = feat_extract.pad(
_a ,padding='max_length' ,max_length=_a ,truncation=_a ,return_tensors='np' )
self.assertIn('attention_mask' ,_a )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) ,[processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() ,[max_length for x in speech_inputs] )
| 271 | 1 |
'''simple docstring'''
from __future__ import annotations
import time
import numpy as np
__lowerCAmelCase = [8, 5, 9, 7]
__lowerCAmelCase = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
__lowerCAmelCase = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self : Tuple ,_a : list[int] ,_a : list[list[int]] ,_a : list[list[int]] ,):
'''simple docstring'''
_a : List[str] = claim_vector
_a : Dict = allocated_resources_table
_a : int = maximum_claim_table
def __lowercase ( self : List[str] ):
'''simple docstring'''
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def __lowercase ( self : int ):
'''simple docstring'''
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(_a ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def __lowercase ( self : Tuple ):
'''simple docstring'''
return {self.__need().index(_a ): i for i in self.__need()}
def __lowercase ( self : Optional[Any] ,**_a : Any ):
'''simple docstring'''
_a : Tuple = self.__need()
_a : Union[str, Any] = self.__allocated_resources_table
_a : Tuple = self.__available_resources()
_a : List[Any] = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print('_' * 50 + '\n' )
while need_list:
_a : List[Any] = False
for each_need in need_list:
_a : Any = True
for index, need in enumerate(_a ):
if need > available_resources[index]:
_a : Optional[int] = False
break
if execution:
_a : List[Any] = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
_a : Union[str, Any] = original_need_index
print(F"""Process {process_number + 1} is executing.""" )
# remove the process run from stack
need_list.remove(_a )
# update available/freed resources stack
_a : Union[str, Any] = np.array(_a ) + np.array(
alloc_resources_table[process_number] )
print(
'Updated available resource stack for processes: '
+ ' '.join([str(_a ) for x in available_resources] ) )
break
if safe:
print('The process is in a safe state.\n' )
else:
print('System in unsafe state. Aborting...\n' )
break
def __lowercase ( self : Tuple ):
'''simple docstring'''
print(' ' * 9 + 'Allocated Resource Table' )
for item in self.__allocated_resources_table:
print(
F"""P{self.__allocated_resources_table.index(_a ) + 1}"""
+ ' '.join(F"""{it:>8}""" for it in item )
+ '\n' )
print(' ' * 9 + 'System Resource Table' )
for item in self.__maximum_claim_table:
print(
F"""P{self.__maximum_claim_table.index(_a ) + 1}"""
+ ' '.join(F"""{it:>8}""" for it in item )
+ '\n' )
print(
'Current Usage by Active Processes: '
+ ' '.join(str(_a ) for x in self.__claim_vector ) )
print(
'Initial Available Resources: '
+ ' '.join(str(_a ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 271 |
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import KarrasVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
__UpperCAmelCase : UNetaDModel
__UpperCAmelCase : KarrasVeScheduler
def __init__( self : Union[str, Any] ,_a : UNetaDModel ,_a : KarrasVeScheduler ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=_a ,scheduler=_a )
@torch.no_grad()
def __call__( self : List[Any] ,_a : int = 1 ,_a : int = 50 ,_a : Optional[Union[torch.Generator, List[torch.Generator]]] = None ,_a : Optional[str] = "pil" ,_a : bool = True ,**_a : List[Any] ,):
'''simple docstring'''
_a : Any = self.unet.config.sample_size
_a : Optional[int] = (batch_size, 3, img_size, img_size)
_a : Dict = self.unet
# sample x_0 ~ N(0, sigma_0^2 * I)
_a : Dict = randn_tensor(_a ,generator=_a ,device=self.device ) * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(_a )
for t in self.progress_bar(self.scheduler.timesteps ):
# here sigma_t == t_i from the paper
_a : Optional[int] = self.scheduler.schedule[t]
_a : List[str] = self.scheduler.schedule[t - 1] if t > 0 else 0
# 1. Select temporarily increased noise level sigma_hat
# 2. Add new noise to move from sample_i to sample_hat
_a, _a : List[Any] = self.scheduler.add_noise_to_input(_a ,_a ,generator=_a )
# 3. Predict the noise residual given the noise magnitude `sigma_hat`
# The model inputs and output are adjusted by following eq. (213) in [1].
_a : Optional[int] = (sigma_hat / 2) * model((sample_hat + 1) / 2 ,sigma_hat / 2 ).sample
# 4. Evaluate dx/dt at sigma_hat
# 5. Take Euler step from sigma to sigma_prev
_a : Tuple = self.scheduler.step(_a ,_a ,_a ,_a )
if sigma_prev != 0:
# 6. Apply 2nd order correction
# The model inputs and output are adjusted by following eq. (213) in [1].
_a : Optional[int] = (sigma_prev / 2) * model((step_output.prev_sample + 1) / 2 ,sigma_prev / 2 ).sample
_a : Optional[Any] = self.scheduler.step_correct(
_a ,_a ,_a ,_a ,step_output.prev_sample ,step_output['derivative'] ,)
_a : Dict = step_output.prev_sample
_a : Tuple = (sample / 2 + 0.5).clamp(0 ,1 )
_a : Optional[Any] = sample.cpu().permute(0 ,2 ,3 ,1 ).numpy()
if output_type == "pil":
_a : List[str] = self.numpy_to_pil(_a )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_a )
| 271 | 1 |
'''simple docstring'''
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
__lowerCAmelCase = TypeVar("""T""")
def UpperCAmelCase_ (__a : int ):
"""simple docstring"""
return (position - 1) // 2
def UpperCAmelCase_ (__a : int ):
"""simple docstring"""
return (2 * position) + 1
def UpperCAmelCase_ (__a : int ):
"""simple docstring"""
return (2 * position) + 2
class UpperCAmelCase__ ( Generic[T] ):
"""simple docstring"""
def __init__( self : Union[str, Any] ):
'''simple docstring'''
_a : list[tuple[T, int]] = []
_a : dict[T, int] = {}
_a : int = 0
def __len__( self : Optional[int] ):
'''simple docstring'''
return self.elements
def __repr__( self : Dict ):
'''simple docstring'''
return str(self.heap )
def __lowercase ( self : Any ):
'''simple docstring'''
return self.elements == 0
def __lowercase ( self : Union[str, Any] ,_a : T ,_a : int ):
'''simple docstring'''
self.heap.append((elem, weight) )
_a : List[str] = self.elements
self.elements += 1
self._bubble_up(_a )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
if self.elements > 1:
self._swap_nodes(0 ,self.elements - 1 )
_a, _a : Any = self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
_a, _a : int = self.heap[0]
self._bubble_down(_a )
return elem
def __lowercase ( self : Union[str, Any] ,_a : T ,_a : int ):
'''simple docstring'''
_a : Any = self.position_map[elem]
_a : str = (elem, weight)
if position > 0:
_a : Dict = get_parent_position(_a )
_a, _a : str = self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(_a )
else:
self._bubble_down(_a )
else:
self._bubble_down(_a )
def __lowercase ( self : int ,_a : T ):
'''simple docstring'''
_a : Any = self.position_map[elem]
if curr_pos == 0:
return None
_a : Optional[Any] = get_parent_position(_a )
_a, _a : Any = self.heap[curr_pos]
_a, _a : Optional[int] = self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(_a ,_a )
return self._bubble_up(_a )
return None
def __lowercase ( self : Optional[int] ,_a : T ):
'''simple docstring'''
_a : int = self.position_map[elem]
_a, _a : Optional[int] = self.heap[curr_pos]
_a : Dict = get_child_left_position(_a )
_a : List[str] = get_child_right_position(_a )
if child_left_position < self.elements and child_right_position < self.elements:
_a, _a : Union[str, Any] = self.heap[child_left_position]
_a, _a : Tuple = self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(_a ,_a )
return self._bubble_down(_a )
if child_left_position < self.elements:
_a, _a : Optional[int] = self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(_a ,_a )
return self._bubble_down(_a )
else:
return None
if child_right_position < self.elements:
_a, _a : Tuple = self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(_a ,_a )
return self._bubble_down(_a )
return None
def __lowercase ( self : Optional[int] ,_a : int ,_a : int ):
'''simple docstring'''
_a : Optional[Any] = self.heap[nodea_pos][0]
_a : Optional[Any] = self.heap[nodea_pos][0]
_a, _a : Tuple = (
self.heap[nodea_pos],
self.heap[nodea_pos],
)
_a : Any = nodea_pos
_a : Optional[int] = nodea_pos
class UpperCAmelCase__ ( Generic[T] ):
"""simple docstring"""
def __init__( self : Optional[int] ):
'''simple docstring'''
_a : dict[T, dict[T, int]] = {}
_a : int = 0
def __repr__( self : int ):
'''simple docstring'''
return str(self.connections )
def __len__( self : int ):
'''simple docstring'''
return self.nodes
def __lowercase ( self : Optional[int] ,_a : T ):
'''simple docstring'''
if node not in self.connections:
_a : Optional[int] = {}
self.nodes += 1
def __lowercase ( self : str ,_a : T ,_a : T ,_a : int ):
'''simple docstring'''
self.add_node(_a )
self.add_node(_a )
_a : Optional[Any] = weight
_a : Any = weight
def UpperCAmelCase_ (__a : GraphUndirectedWeighted[T] , ):
"""simple docstring"""
_a : dict[T, int] = {node: maxsize for node in graph.connections}
_a : dict[T, T | None] = {node: None for node in graph.connections}
_a : MinPriorityQueue[T] = MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(__a , __a )
if priority_queue.is_empty():
return dist, parent
# initialization
_a : Optional[Any] = priority_queue.extract_min()
_a : List[Any] = 0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
_a : Optional[int] = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(__a , dist[neighbour] )
_a : Dict = node
# running prim's algorithm
while not priority_queue.is_empty():
_a : Union[str, Any] = priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
_a : Dict = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(__a , dist[neighbour] )
_a : List[str] = node
return dist, parent
| 271 |
'''simple docstring'''
import importlib
import inspect
import json
import os
import re
import shutil
import sys
from pathlib import Path
from typing import Dict, Optional, Union
from urllib import request
from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info
from packaging import version
from .. import __version__
from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging
__lowerCAmelCase = (
"""https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py"""
)
__lowerCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
def UpperCAmelCase_ ():
"""simple docstring"""
_a : Optional[int] = 'https://pypi.org/pypi/diffusers/json'
_a : int = json.loads(request.urlopen(__a ).read() )['releases'].keys()
return sorted(__a , key=lambda __a : version.Version(__a ) )
def UpperCAmelCase_ ():
"""simple docstring"""
if HF_MODULES_CACHE in sys.path:
return
sys.path.append(__a )
os.makedirs(__a , exist_ok=__a )
_a : str = Path(__a ) / '__init__.py'
if not init_path.exists():
init_path.touch()
def UpperCAmelCase_ (__a : Union[str, os.PathLike] ):
"""simple docstring"""
init_hf_modules()
_a : Dict = Path(__a ) / name
# If the parent module does not exist yet, recursively create it.
if not dynamic_module_path.parent.exists():
create_dynamic_module(dynamic_module_path.parent )
os.makedirs(__a , exist_ok=__a )
_a : Optional[int] = dynamic_module_path / '__init__.py'
if not init_path.exists():
init_path.touch()
def UpperCAmelCase_ (__a : str ):
"""simple docstring"""
with open(__a , 'r' , encoding='utf-8' ) as f:
_a : int = f.read()
# Imports of the form `import .xxx`
_a : Tuple = re.findall('^\s*import\s+\.(\S+)\s*$' , __a , flags=re.MULTILINE )
# Imports of the form `from .xxx import yyy`
relative_imports += re.findall('^\s*from\s+\.(\S+)\s+import' , __a , flags=re.MULTILINE )
# Unique-ify
return list(set(__a ) )
def UpperCAmelCase_ (__a : Any ):
"""simple docstring"""
_a : Optional[int] = False
_a : Optional[int] = [module_file]
_a : List[str] = []
# Let's recurse through all relative imports
while not no_change:
_a : str = []
for f in files_to_check:
new_imports.extend(get_relative_imports(__a ) )
_a : Union[str, Any] = Path(__a ).parent
_a : str = [str(module_path / m ) for m in new_imports]
_a : Tuple = [f for f in new_import_files if f not in all_relative_imports]
_a : Dict = [f"""{f}.py""" for f in new_import_files]
_a : List[str] = len(__a ) == 0
all_relative_imports.extend(__a )
return all_relative_imports
def UpperCAmelCase_ (__a : Tuple ):
"""simple docstring"""
with open(__a , 'r' , encoding='utf-8' ) as f:
_a : Dict = f.read()
# Imports of the form `import xxx`
_a : Optional[int] = re.findall('^\s*import\s+(\S+)\s*$' , __a , flags=re.MULTILINE )
# Imports of the form `from xxx import yyy`
imports += re.findall('^\s*from\s+(\S+)\s+import' , __a , flags=re.MULTILINE )
# Only keep the top-level module
_a : List[str] = [imp.split('.' )[0] for imp in imports if not imp.startswith('.' )]
# Unique-ify and test we got them all
_a : Optional[int] = list(set(__a ) )
_a : List[str] = []
for imp in imports:
try:
importlib.import_module(__a )
except ImportError:
missing_packages.append(__a )
if len(__a ) > 0:
raise ImportError(
'This modeling file requires the following packages that were not found in your environment: '
f"""{', '.join(__a )}. Run `pip install {' '.join(__a )}`""" )
return get_relative_imports(__a )
def UpperCAmelCase_ (__a : Any , __a : str ):
"""simple docstring"""
_a : Any = module_path.replace(os.path.sep , '.' )
_a : Union[str, Any] = importlib.import_module(__a )
if class_name is None:
return find_pipeline_class(__a )
return getattr(__a , __a )
def UpperCAmelCase_ (__a : Optional[int] ):
"""simple docstring"""
from ..pipelines import DiffusionPipeline
_a : List[str] = dict(inspect.getmembers(__a , inspect.isclass ) )
_a : str = None
for cls_name, cls in cls_members.items():
if (
cls_name != DiffusionPipeline.__name__
and issubclass(cls , __a )
and cls.__module__.split('.' )[0] != "diffusers"
):
if pipeline_class is not None:
raise ValueError(
f"""Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:"""
f""" {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in"""
f""" {loaded_module}.""" )
_a : Any = cls
return pipeline_class
def UpperCAmelCase_ (__a : Union[str, os.PathLike] , __a : str , __a : Optional[Union[str, os.PathLike]] = None , __a : bool = False , __a : bool = False , __a : Optional[Dict[str, str]] = None , __a : Optional[Union[bool, str]] = None , __a : Optional[str] = None , __a : bool = False , ):
"""simple docstring"""
_a : str = str(__a )
_a : Optional[Any] = os.path.join(__a , __a )
if os.path.isfile(__a ):
_a : Tuple = module_file_or_url
_a : Optional[Any] = 'local'
elif pretrained_model_name_or_path.count('/' ) == 0:
_a : int = get_diffusers_versions()
# cut ".dev0"
_a : Any = 'v' + '.'.join(__version__.split('.' )[:3] )
# retrieve github version that matches
if revision is None:
_a : Any = latest_version if latest_version[1:] in available_versions else 'main'
logger.info(f"""Defaulting to latest_version: {revision}.""" )
elif revision in available_versions:
_a : Any = f"""v{revision}"""
elif revision == "main":
_a : Optional[int] = revision
else:
raise ValueError(
f"""`custom_revision`: {revision} does not exist. Please make sure to choose one of"""
f""" {', '.join(available_versions + ['main'] )}.""" )
# community pipeline on GitHub
_a : Tuple = COMMUNITY_PIPELINES_URL.format(revision=__a , pipeline=__a )
try:
_a : Any = cached_download(
__a , cache_dir=__a , force_download=__a , proxies=__a , resume_download=__a , local_files_only=__a , use_auth_token=__a , )
_a : List[Any] = 'git'
_a : Any = pretrained_model_name_or_path + '.py'
except EnvironmentError:
logger.error(f"""Could not locate the {module_file} inside {pretrained_model_name_or_path}.""" )
raise
else:
try:
# Load from URL or cache if already cached
_a : Optional[Any] = hf_hub_download(
__a , __a , cache_dir=__a , force_download=__a , proxies=__a , resume_download=__a , local_files_only=__a , use_auth_token=__a , )
_a : List[Any] = os.path.join('local' , '--'.join(pretrained_model_name_or_path.split('/' ) ) )
except EnvironmentError:
logger.error(f"""Could not locate the {module_file} inside {pretrained_model_name_or_path}.""" )
raise
# Check we have all the requirements in our environment
_a : Optional[int] = check_imports(__a )
# Now we move the module inside our cached dynamic modules.
_a : Optional[Any] = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule
create_dynamic_module(__a )
_a : Any = Path(__a ) / full_submodule
if submodule == "local" or submodule == "git":
# We always copy local files (we could hash the file to see if there was a change, and give them the name of
# that hash, to only copy when there is a modification but it seems overkill for now).
# The only reason we do the copy is to avoid putting too many folders in sys.path.
shutil.copy(__a , submodule_path / module_file )
for module_needed in modules_needed:
_a : Dict = f"""{module_needed}.py"""
shutil.copy(os.path.join(__a , __a ) , submodule_path / module_needed )
else:
# Get the commit hash
# TODO: we will get this info in the etag soon, so retrieve it from there and not here.
if isinstance(__a , __a ):
_a : Optional[Any] = use_auth_token
elif use_auth_token is True:
_a : List[Any] = HfFolder.get_token()
else:
_a : Dict = None
_a : int = model_info(__a , revision=__a , token=__a ).sha
# The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the
# benefit of versioning.
_a : Optional[int] = submodule_path / commit_hash
_a : str = full_submodule + os.path.sep + commit_hash
create_dynamic_module(__a )
if not (submodule_path / module_file).exists():
shutil.copy(__a , submodule_path / module_file )
# Make sure we also have every file with relative
for module_needed in modules_needed:
if not (submodule_path / module_needed).exists():
get_cached_module_file(
__a , f"""{module_needed}.py""" , cache_dir=__a , force_download=__a , resume_download=__a , proxies=__a , use_auth_token=__a , revision=__a , local_files_only=__a , )
return os.path.join(__a , __a )
def UpperCAmelCase_ (__a : Union[str, os.PathLike] , __a : str , __a : Optional[str] = None , __a : Optional[Union[str, os.PathLike]] = None , __a : bool = False , __a : bool = False , __a : Optional[Dict[str, str]] = None , __a : Optional[Union[bool, str]] = None , __a : Optional[str] = None , __a : bool = False , **__a : str , ):
"""simple docstring"""
_a : Dict = get_cached_module_file(
__a , __a , cache_dir=__a , force_download=__a , resume_download=__a , proxies=__a , use_auth_token=__a , revision=__a , local_files_only=__a , )
return get_class_in_module(__a , final_module.replace('.py' , '' ) )
| 271 | 1 |
'''simple docstring'''
import random
import torch
from huggingface_hub import HfApi
from diffusers import UNetaDModel
__lowerCAmelCase = HfApi()
__lowerCAmelCase = {}
# fmt: off
__lowerCAmelCase = torch.tensor([
-0.7_515, -1.6_883, 0.2_420, 0.0_300, 0.6_347, 1.3_433, -1.1_743, -3.7_467,
1.2_342, -2.2_485, 0.4_636, 0.8_076, -0.7_991, 0.3_969, 0.8_498, 0.9_189,
-1.8_887, -3.3_522, 0.7_639, 0.2_040, 0.6_271, -2.7_148, -1.6_316, 3.0_839,
0.3_186, 0.2_721, -0.9_759, -1.2_461, 2.6_257, 1.3_557
])
__lowerCAmelCase = torch.tensor([
-2.3_639, -2.5_344, 0.0_054, -0.6_674, 1.5_990, 1.0_158, 0.3_124, -2.1_436,
1.8_795, -2.5_429, -0.1_566, -0.3_973, 1.2_490, 2.6_447, 1.2_283, -0.5_208,
-2.8_154, -3.5_119, 2.3_838, 1.2_033, 1.7_201, -2.1_256, -1.4_576, 2.7_948,
2.4_204, -0.9_752, -1.2_546, 0.8_027, 3.2_758, 3.1_365
])
__lowerCAmelCase = torch.tensor([
-0.6_531, -0.6_891, -0.3_172, -0.5_375, -0.9_140, -0.5_367, -0.1_175, -0.7_869,
-0.3_808, -0.4_513, -0.2_098, -0.0_083, 0.3_183, 0.5_140, 0.2_247, -0.1_304,
-0.1_302, -0.2_802, -0.2_084, -0.2_025, -0.4_967, -0.4_873, -0.0_861, 0.6_925,
0.0_250, 0.1_290, -0.1_543, 0.6_316, 1.0_460, 1.4_943
])
__lowerCAmelCase = torch.tensor([
0.0_911, 0.1_107, 0.0_182, 0.0_435, -0.0_805, -0.0_608, 0.0_381, 0.2_172,
-0.0_280, 0.1_327, -0.0_299, -0.0_255, -0.0_050, -0.1_170, -0.1_046, 0.0_309,
0.1_367, 0.1_728, -0.0_533, -0.0_748, -0.0_534, 0.1_624, 0.0_384, -0.1_805,
-0.0_707, 0.0_642, 0.0_220, -0.0_134, -0.1_333, -0.1_505
])
__lowerCAmelCase = torch.tensor([
0.1_321, 0.1_337, 0.0_440, 0.0_622, -0.0_591, -0.0_370, 0.0_503, 0.2_133,
-0.0_177, 0.1_415, -0.0_116, -0.0_112, 0.0_044, -0.0_980, -0.0_789, 0.0_395,
0.1_502, 0.1_785, -0.0_488, -0.0_514, -0.0_404, 0.1_539, 0.0_454, -0.1_559,
-0.0_665, 0.0_659, 0.0_383, -0.0_005, -0.1_266, -0.1_386
])
__lowerCAmelCase = torch.tensor([
0.1_154, 0.1_218, 0.0_307, 0.0_526, -0.0_711, -0.0_541, 0.0_366, 0.2_078,
-0.0_267, 0.1_317, -0.0_226, -0.0_193, -0.0_014, -0.1_055, -0.0_902, 0.0_330,
0.1_391, 0.1_709, -0.0_562, -0.0_693, -0.0_560, 0.1_482, 0.0_381, -0.1_683,
-0.0_681, 0.0_661, 0.0_331, -0.0_046, -0.1_268, -0.1_431
])
__lowerCAmelCase = torch.tensor([
0.1_192, 0.1_240, 0.0_414, 0.0_606, -0.0_557, -0.0_412, 0.0_430, 0.2_042,
-0.0_200, 0.1_385, -0.0_115, -0.0_132, 0.0_017, -0.0_965, -0.0_802, 0.0_398,
0.1_433, 0.1_747, -0.0_458, -0.0_533, -0.0_407, 0.1_545, 0.0_419, -0.1_574,
-0.0_645, 0.0_626, 0.0_341, -0.0_010, -0.1_199, -0.1_390
])
__lowerCAmelCase = torch.tensor([
0.1_075, 0.1_074, 0.0_205, 0.0_431, -0.0_774, -0.0_607, 0.0_298, 0.2_042,
-0.0_320, 0.1_267, -0.0_281, -0.0_250, -0.0_064, -0.1_091, -0.0_946, 0.0_290,
0.1_328, 0.1_650, -0.0_580, -0.0_738, -0.0_586, 0.1_440, 0.0_337, -0.1_746,
-0.0_712, 0.0_605, 0.0_250, -0.0_099, -0.1_316, -0.1_473
])
__lowerCAmelCase = torch.tensor([
-1.4_572, -2.0_481, -0.0_414, -0.6_005, 1.4_136, 0.5_848, 0.4_028, -2.7_330,
1.2_212, -2.1_228, 0.2_155, 0.4_039, 0.7_662, 2.0_535, 0.7_477, -0.3_243,
-2.1_758, -2.7_648, 1.6_947, 0.7_026, 1.2_338, -1.6_078, -0.8_682, 2.2_810,
1.8_574, -0.5_718, -0.5_586, -0.0_186, 2.3_415, 2.1_251])
__lowerCAmelCase = torch.tensor([
-1.3_690, -1.9_720, -0.4_090, -0.6_966, 1.4_660, 0.9_938, -0.1_385, -2.7_324,
0.7_736, -1.8_917, 0.2_923, 0.4_293, 0.1_693, 1.4_112, 1.1_887, -0.3_181,
-2.2_160, -2.6_381, 1.3_170, 0.8_163, 0.9_240, -1.6_544, -0.6_099, 2.5_259,
1.6_430, -0.9_090, -0.9_392, -0.0_126, 2.4_268, 2.3_266
])
__lowerCAmelCase = torch.tensor([
-1.3_525, -1.9_628, -0.3_956, -0.6_860, 1.4_664, 1.0_014, -0.1_259, -2.7_212,
0.7_772, -1.8_811, 0.2_996, 0.4_388, 0.1_704, 1.4_029, 1.1_701, -0.3_027,
-2.2_053, -2.6_287, 1.3_350, 0.8_131, 0.9_274, -1.6_292, -0.6_098, 2.5_131,
1.6_505, -0.8_958, -0.9_298, -0.0_151, 2.4_257, 2.3_355
])
__lowerCAmelCase = torch.tensor([
-2.0_585, -2.7_897, -0.2_850, -0.8_940, 1.9_052, 0.5_702, 0.6_345, -3.8_959,
1.5_932, -3.2_319, 0.1_974, 0.0_287, 1.7_566, 2.6_543, 0.8_387, -0.5_351,
-3.2_736, -4.3_375, 2.9_029, 1.6_390, 1.4_640, -2.1_701, -1.9_013, 2.9_341,
3.4_981, -0.6_255, -1.1_644, -0.1_591, 3.7_097, 3.2_066
])
__lowerCAmelCase = torch.tensor([
-2.3_139, -2.5_594, -0.0_197, -0.6_785, 1.7_001, 1.1_606, 0.3_075, -2.1_740,
1.8_071, -2.5_630, -0.0_926, -0.3_811, 1.2_116, 2.6_246, 1.2_731, -0.5_398,
-2.8_153, -3.6_140, 2.3_893, 1.3_262, 1.6_258, -2.1_856, -1.3_267, 2.8_395,
2.3_779, -1.0_623, -1.2_468, 0.8_959, 3.3_367, 3.2_243
])
__lowerCAmelCase = torch.tensor([
-2.0_628, -2.7_667, -0.2_089, -0.8_263, 2.0_539, 0.5_992, 0.6_495, -3.8_336,
1.6_025, -3.2_817, 0.1_721, -0.0_633, 1.7_516, 2.7_039, 0.8_100, -0.5_908,
-3.2_113, -4.4_343, 2.9_257, 1.3_632, 1.5_562, -2.1_489, -1.9_894, 3.0_560,
3.3_396, -0.7_328, -1.0_417, 0.0_383, 3.7_093, 3.2_343
])
__lowerCAmelCase = torch.tensor([
-1.4_574, -2.0_569, -0.0_473, -0.6_117, 1.4_018, 0.5_769, 0.4_129, -2.7_344,
1.2_241, -2.1_397, 0.2_000, 0.3_937, 0.7_616, 2.0_453, 0.7_324, -0.3_391,
-2.1_746, -2.7_744, 1.6_963, 0.6_921, 1.2_187, -1.6_172, -0.8_877, 2.2_439,
1.8_471, -0.5_839, -0.5_605, -0.0_464, 2.3_250, 2.1_219
])
# fmt: on
__lowerCAmelCase = api.list_models(filter="""diffusers""")
for mod in models:
if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256":
__lowerCAmelCase = """/home/patrick/google_checkpoints/""" + mod.modelId.split("""/""")[-1]
print(f'''Started running {mod.modelId}!!!''')
if mod.modelId.startswith("""CompVis"""):
__lowerCAmelCase = UNetaDModel.from_pretrained(local_checkpoint, subfolder="""unet""")
else:
__lowerCAmelCase = UNetaDModel.from_pretrained(local_checkpoint)
torch.manual_seed(0)
random.seed(0)
__lowerCAmelCase = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
__lowerCAmelCase = torch.tensor([1_0] * noise.shape[0])
with torch.no_grad():
__lowerCAmelCase = model(noise, time_step).sample
assert torch.allclose(
logits[0, 0, 0, :3_0], results["""_""".join("""_""".join(mod.modelId.split("""/""")).split("""-"""))], atol=1e-3
)
print(f'''{mod.modelId} has passed successfully!!!''')
| 271 |
'''simple docstring'''
def UpperCAmelCase_ (__a : list , __a : list , __a : int ):
"""simple docstring"""
_a : Optional[Any] = len(__a )
_a : int = [[0] * n for i in range(__a )]
for i in range(__a ):
_a : Tuple = y_points[i]
for i in range(2 , __a ):
for j in range(__a , __a ):
_a : Tuple = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 271 | 1 |
'''simple docstring'''
import os
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home
__lowerCAmelCase = HUGGINGFACE_HUB_CACHE
__lowerCAmelCase = """config.json"""
__lowerCAmelCase = """diffusion_pytorch_model.bin"""
__lowerCAmelCase = """diffusion_flax_model.msgpack"""
__lowerCAmelCase = """model.onnx"""
__lowerCAmelCase = """diffusion_pytorch_model.safetensors"""
__lowerCAmelCase = """weights.pb"""
__lowerCAmelCase = """https://huggingface.co"""
__lowerCAmelCase = default_cache_path
__lowerCAmelCase = """diffusers_modules"""
__lowerCAmelCase = os.getenv("""HF_MODULES_CACHE""", os.path.join(hf_cache_home, """modules"""))
__lowerCAmelCase = ["""fp16""", """non-ema"""]
__lowerCAmelCase = """.self_attn"""
| 271 |
'''simple docstring'''
import inspect
import os
import unittest
from pathlib import Path
import torch
import accelerate
from accelerate.test_utils import execute_subprocess_async
from accelerate.test_utils.testing import run_command
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = inspect.getfile(accelerate.test_utils )
__UpperCAmelCase : List[str] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_cli.py'''] )
__UpperCAmelCase : Dict = ['''accelerate''', '''launch''']
__UpperCAmelCase : Dict = Path.home() / '''.cache/huggingface/accelerate'''
__UpperCAmelCase : Dict = '''default_config.yaml'''
__UpperCAmelCase : Optional[Any] = config_folder / config_file
__UpperCAmelCase : Dict = config_folder / '''_default_config.yaml'''
__UpperCAmelCase : Any = Path('''tests/test_configs''' )
@classmethod
def __lowercase ( cls : int ):
'''simple docstring'''
if cls.config_path.is_file():
cls.config_path.rename(cls.changed_path )
@classmethod
def __lowercase ( cls : List[Any] ):
'''simple docstring'''
if cls.changed_path.is_file():
cls.changed_path.rename(cls.config_path )
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
_a : Dict = self.base_cmd
if torch.cuda.is_available() and (torch.cuda.device_count() > 1):
cmd += ["--multi_gpu"]
execute_subprocess_async(cmd + [self.test_file_path] ,env=os.environ.copy() )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
for config in sorted(self.test_config_path.glob('**/*.yaml' ) ):
with self.subTest(config_file=_a ):
execute_subprocess_async(
self.base_cmd + ['--config_file', str(_a ), self.test_file_path] ,env=os.environ.copy() )
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
execute_subprocess_async(['accelerate', 'test'] ,env=os.environ.copy() )
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = '''test-tpu'''
__UpperCAmelCase : Any = '''us-central1-a'''
__UpperCAmelCase : List[Any] = '''ls'''
__UpperCAmelCase : Any = ['''accelerate''', '''tpu-config''']
__UpperCAmelCase : Dict = '''cd /usr/share'''
__UpperCAmelCase : Any = '''tests/test_samples/test_command_file.sh'''
__UpperCAmelCase : List[Any] = '''Running gcloud compute tpus tpu-vm ssh'''
def __lowercase ( self : Dict ):
'''simple docstring'''
_a : Optional[Any] = run_command(
self.cmd
+ ['--command', self.command, '--tpu_zone', self.tpu_zone, '--tpu_name', self.tpu_name, '--debug'] ,return_stdout=_a ,)
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" ,_a ,)
def __lowercase ( self : List[str] ):
'''simple docstring'''
_a : Any = run_command(
self.cmd
+ [
'--config_file',
'tests/test_configs/0_12_0.yaml',
'--command',
self.command,
'--tpu_zone',
self.tpu_zone,
'--tpu_name',
self.tpu_name,
'--debug',
] ,return_stdout=_a ,)
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" ,_a ,)
def __lowercase ( self : List[str] ):
'''simple docstring'''
_a : Optional[int] = run_command(
self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--debug'] ,return_stdout=_a )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" ,_a ,)
def __lowercase ( self : int ):
'''simple docstring'''
_a : Optional[Any] = run_command(
self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--command', self.command, '--debug'] ,return_stdout=_a ,)
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" ,_a ,)
def __lowercase ( self : str ):
'''simple docstring'''
_a : List[str] = run_command(
self.cmd
+ [
'--config_file',
'tests/test_configs/latest.yaml',
'--command',
self.command,
'--command',
'echo "Hello World"',
'--debug',
] ,return_stdout=_a ,)
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo \"Hello World\" --worker all""" ,_a ,)
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
_a : Any = run_command(
self.cmd
+ ['--config_file', 'tests/test_configs/latest.yaml', '--command_file', self.command_file, '--debug'] ,return_stdout=_a ,)
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" ,_a ,)
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
_a : Union[str, Any] = run_command(
self.cmd
+ [
'--config_file',
'tests/test_configs/0_12_0.yaml',
'--command_file',
self.command_file,
'--tpu_zone',
self.tpu_zone,
'--tpu_name',
self.tpu_name,
'--debug',
] ,return_stdout=_a ,)
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" ,_a ,)
def __lowercase ( self : Any ):
'''simple docstring'''
_a : Optional[int] = run_command(
self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--install_accelerate', '--debug'] ,return_stdout=_a ,)
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo \"hello world\"; echo \"this is a second command\" --worker all""" ,_a ,)
def __lowercase ( self : List[str] ):
'''simple docstring'''
_a : Optional[int] = run_command(
self.cmd
+ [
'--config_file',
'tests/test_configs/latest.yaml',
'--install_accelerate',
'--accelerate_version',
'12.0.0',
'--debug',
] ,return_stdout=_a ,)
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo \"hello world\"; echo \"this is a second command\" --worker all""" ,_a ,)
| 271 | 1 |
'''simple docstring'''
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def UpperCAmelCase_ (__a : Dict ):
"""simple docstring"""
_a : List[Any] = os.path.join(args.tf_model_dir , 'parameters.json' )
_a : List[Any] = json.loads(open(__a ).read() )
if not params:
raise ValueError(
f"""It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.""" )
if not args.output.endswith('.pt' ):
_a : Optional[int] = args.output + '.pt'
_a : str = OrderedDict()
with tf.device('/CPU:0' ):
_a : Dict = tf.train.load_checkpoint(args.tf_model_dir )
_a : Union[str, Any] = reader.get_variable_to_shape_map()
for key_name in shapes.keys():
_a : Optional[Any] = reader.get_tensor(__a ).astype(np.floataa )
if key_name.endswith('/adam_m' ) or key_name.endswith('/adam_v' ):
continue
if key_name.startswith('pasts/' ):
if key_name.startswith('pasts/mlp' ):
_a : int = int(key_name[9] )
elif key_name.startswith('pasts/out' ):
_a : Tuple = 8
_a : Tuple = 'model.sqout.%d.weight' % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
_a : Optional[Any] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_a : Union[str, Any] = torch.tensor(__a )
elif key_name.startswith('model/moe' ):
_a : str = int(key_name[9:].split('/' )[0] )
if key_name.endswith('/switch_gating/kernel' ):
_a : Union[str, Any] = 'model.blocks.%d.feed_forward.mlp.router.classifier.weight' % player
_a : Optional[int] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_a : List[str] = torch.tensor(__a )
elif key_name.endswith('/softmlp/kernel' ):
_a : List[str] = 'model.blocks.%d.feed_forward.soft_bypass_mlp.weight' % player
_a : List[str] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_a : str = torch.tensor(__a )
elif key_name.endswith('/wo/kernel' ) or key_name.endswith('/wi/kernel' ):
_a : Union[str, Any] = key_name[-9:-7]
for i in range(1_6 ):
_a : str = 'model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight' % (player, i, nlayer)
_a : List[Any] = (
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
_a : str = torch.tensor(__a )
elif key_name.startswith('model/mlp' ):
_a : Optional[Any] = int(key_name[9:].split('/' )[0] )
if key_name.endswith('/p1/kernel' ):
_a : List[Any] = 'model.blocks.%d.feed_forward.mlp.wi.weight' % player
_a : List[str] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_a : Tuple = torch.tensor(__a )
elif key_name.endswith('/p1/bias' ):
_a : List[str] = 'model.blocks.%d.feed_forward.mlp.wi.bias' % player
_a : Optional[int] = vnp.copy() # same because it is one dimensional
_a : int = torch.tensor(__a )
elif key_name.endswith('/p2/kernel' ):
_a : str = 'model.blocks.%d.feed_forward.mlp.wo.weight' % player
_a : Optional[Any] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_a : Union[str, Any] = torch.tensor(__a )
elif key_name.endswith('/p2/bias' ):
_a : Union[str, Any] = 'model.blocks.%d.feed_forward.mlp.wo.bias' % player
_a : Optional[int] = vnp.copy() # same because it is one dimensional
_a : int = torch.tensor(__a )
elif key_name.startswith('model/ln' ):
_a : List[Any] = int(key_name[8:].split('/' )[0] )
if key_name.endswith('/b' ):
_a : List[Any] = 'model.blocks.%d.feed_forward.norm.bias' % player
_a : Tuple = vnp.copy() # same because it is one dimensional
_a : Union[str, Any] = torch.tensor(__a )
elif key_name.endswith('/g' ):
_a : str = 'model.blocks.%d.feed_forward.norm.weight' % player
_a : Dict = vnp.copy() # same because it is one dimensional
_a : str = torch.tensor(__a )
elif key_name.startswith('model/att' ):
_a : List[Any] = int(key_name[9:].split('/' )[0] )
if key_name.endswith('/qkv/kernel' ):
_a : Dict = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
_a : str = state[:, 0, :, :]
_a : Optional[Any] = state[:, 1, :, :]
_a : List[str] = state[:, 2, :, :]
_a : Dict = (
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_a : Dict = (
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_a : Union[str, Any] = (
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_a : List[Any] = 'model.blocks.%d.self_attn.self_attn.q_proj.weight' % player
_a : Any = torch.tensor(__a )
_a : Optional[int] = 'model.blocks.%d.self_attn.self_attn.k_proj.weight' % player
_a : str = torch.tensor(__a )
_a : Optional[int] = 'model.blocks.%d.self_attn.self_attn.v_proj.weight' % player
_a : List[str] = torch.tensor(__a )
elif key_name.endswith('/o/kernel' ):
_a : List[Any] = 'model.blocks.%d.self_attn.self_attn.out_proj.weight' % player
_a : Any = (
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
_a : List[str] = torch.tensor(__a )
elif key_name.startswith('model/an' ):
_a : List[Any] = int(key_name[8:].split('/' )[0] )
if key_name.endswith('/b' ):
_a : Optional[int] = 'model.blocks.%d.self_attn.norm.bias' % player
_a : Optional[int] = vnp.copy() # same because it is one dimensional
_a : Union[str, Any] = torch.tensor(__a )
elif key_name.endswith('/g' ):
_a : int = 'model.blocks.%d.self_attn.norm.weight' % player
_a : Optional[Any] = vnp.copy() # same because it is one dimensional
_a : Optional[int] = torch.tensor(__a )
elif (
key_name.startswith('model/wte' )
or key_name.startswith('model/wpe' )
or key_name.startswith('model/ete' )
):
_a : Optional[Any] = {'wte': 'embed_tokens', 'wpe': 'position_embeddings', 'ete': 'extra_position_embeddings'}[
key_name[-3:]
]
_a : List[str] = 'model.%s.weight' % nlayer
_a : Any = vnp.copy() # same in embedded
_a : int = torch.tensor(__a )
if key_name.startswith('model/wte' ):
_a : Tuple = 'lm_head.weight'
_a : List[Any] = vnp.copy() # same in embedded
_a : int = torch.tensor(__a )
elif key_name.startswith('model/wob' ):
_a : Union[str, Any] = 'final_logits_bias'
_a : List[Any] = vnp.copy() # same in embedded
_a : Any = state.reshape((1, -1) )
_a : List[Any] = torch.tensor(__a )
elif key_name == "model/dense/kernel":
_a : Optional[Any] = 'model.last_project.weight'
_a : List[Any] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_a : Any = torch.tensor(__a )
elif key_name == "model/dense_1/bias":
_a : Union[str, Any] = 'model.last_project.bias'
_a : Tuple = vnp.copy() # same because it is one dimensional
_a : str = torch.tensor(__a )
torch.save(__a , args.output )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser(
description="""model converter.""", formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument("""--tf_model_dir""", metavar="""PATH""", type=str, required=True, help="""import model""")
parser.add_argument("""--output""", metavar="""PATH""", type=str, required=True, help="""output model""")
__lowerCAmelCase = parser.parse_args()
convert_tf_gptsan_to_pt(args)
| 271 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
__lowerCAmelCase = TypeVar("""T""")
class UpperCAmelCase__ ( Generic[T] ):
"""simple docstring"""
def __init__( self : Tuple ,_a : T ):
'''simple docstring'''
_a : List[str] = data
_a : Node[T] | None = None
def __str__( self : Dict ):
'''simple docstring'''
return F"""{self.data}"""
class UpperCAmelCase__ ( Generic[T] ):
"""simple docstring"""
def __init__( self : Optional[int] ):
'''simple docstring'''
_a : Node[T] | None = None
def __iter__( self : str ):
'''simple docstring'''
_a : Tuple = self.top
while node:
yield node.data
_a : int = node.next
def __str__( self : str ):
'''simple docstring'''
return "->".join([str(_a ) for item in self] )
def __len__( self : Optional[Any] ):
'''simple docstring'''
return len(tuple(iter(self ) ) )
def __lowercase ( self : str ):
'''simple docstring'''
return self.top is None
def __lowercase ( self : List[Any] ,_a : T ):
'''simple docstring'''
_a : int = Node(_a )
if not self.is_empty():
_a : Optional[Any] = self.top
_a : List[str] = node
def __lowercase ( self : Tuple ):
'''simple docstring'''
if self.is_empty():
raise IndexError('pop from empty stack' )
assert isinstance(self.top ,_a )
_a : List[Any] = self.top
_a : int = self.top.next
return pop_node.data
def __lowercase ( self : List[str] ):
'''simple docstring'''
if self.is_empty():
raise IndexError('peek from empty stack' )
assert self.top is not None
return self.top.data
def __lowercase ( self : List[str] ):
'''simple docstring'''
_a : Optional[int] = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 271 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__lowerCAmelCase = {"""configuration_encoder_decoder""": ["""EncoderDecoderConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = ["""EncoderDecoderModel"""]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = ["""TFEncoderDecoderModel"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = ["""FlaxEncoderDecoderModel"""]
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 271 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@property
def __lowercase ( self : List[str] ):
'''simple docstring'''
torch.manual_seed(0 )
_a : int = UNetaDModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=3 ,out_channels=3 ,down_block_types=('DownBlock2D', 'AttnDownBlock2D') ,up_block_types=('AttnUpBlock2D', 'UpBlock2D') ,)
return model
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
_a : str = self.dummy_uncond_unet
_a : int = PNDMScheduler()
_a : str = PNDMPipeline(unet=_a ,scheduler=_a )
pndm.to(_a )
pndm.set_progress_bar_config(disable=_a )
_a : Optional[int] = torch.manual_seed(0 )
_a : Optional[Any] = pndm(generator=_a ,num_inference_steps=20 ,output_type='numpy' ).images
_a : List[str] = torch.manual_seed(0 )
_a : Any = pndm(generator=_a ,num_inference_steps=20 ,output_type='numpy' ,return_dict=_a )[0]
_a : List[Any] = image[0, -3:, -3:, -1]
_a : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_a : List[Any] = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self : Tuple ):
'''simple docstring'''
_a : List[str] = 'google/ddpm-cifar10-32'
_a : str = UNetaDModel.from_pretrained(_a )
_a : Union[str, Any] = PNDMScheduler()
_a : Tuple = PNDMPipeline(unet=_a ,scheduler=_a )
pndm.to(_a )
pndm.set_progress_bar_config(disable=_a )
_a : str = torch.manual_seed(0 )
_a : Optional[Any] = pndm(generator=_a ,output_type='numpy' ).images
_a : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_a : Tuple = np.array([0.1564, 0.1_4645, 0.1406, 0.1_4715, 0.1_2425, 0.1_4045, 0.1_3115, 0.1_2175, 0.125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 271 | 1 |
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import is_speech_available, is_vision_available
from transformers.testing_utils import require_torch
if is_vision_available():
from transformers import TvltImageProcessor
if is_speech_available():
from transformers import TvltFeatureExtractor
from transformers import TvltProcessor
@require_torch
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self : Tuple ):
'''simple docstring'''
_a : str = 'ZinengTang/tvlt-base'
_a : Optional[Any] = tempfile.mkdtemp()
def __lowercase ( self : List[str] ,**_a : int ):
'''simple docstring'''
return TvltImageProcessor.from_pretrained(self.checkpoint ,**_a )
def __lowercase ( self : List[str] ,**_a : Optional[Any] ):
'''simple docstring'''
return TvltFeatureExtractor.from_pretrained(self.checkpoint ,**_a )
def __lowercase ( self : List[Any] ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
_a : int = self.get_image_processor()
_a : Optional[Any] = self.get_feature_extractor()
_a : Any = TvltProcessor(image_processor=_a ,feature_extractor=_a )
processor.save_pretrained(self.tmpdirname )
_a : Any = TvltProcessor.from_pretrained(self.tmpdirname )
self.assertIsInstance(processor.feature_extractor ,_a )
self.assertIsInstance(processor.image_processor ,_a )
def __lowercase ( self : Tuple ):
'''simple docstring'''
_a : str = self.get_image_processor()
_a : Any = self.get_feature_extractor()
_a : List[str] = TvltProcessor(image_processor=_a ,feature_extractor=_a )
_a : Tuple = np.ones([1_2000] )
_a : Optional[Any] = feature_extractor(_a ,return_tensors='np' )
_a : str = processor(audio=_a ,return_tensors='np' )
for key in audio_dict.keys():
self.assertAlmostEqual(audio_dict[key].sum() ,input_processor[key].sum() ,delta=1E-2 )
def __lowercase ( self : str ):
'''simple docstring'''
_a : Tuple = self.get_image_processor()
_a : Tuple = self.get_feature_extractor()
_a : List[str] = TvltProcessor(image_processor=_a ,feature_extractor=_a )
_a : Dict = np.ones([3, 224, 224] )
_a : List[str] = image_processor(_a ,return_tensors='np' )
_a : Tuple = processor(images=_a ,return_tensors='np' )
for key in image_dict.keys():
self.assertAlmostEqual(image_dict[key].sum() ,input_processor[key].sum() ,delta=1E-2 )
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
_a : int = self.get_image_processor()
_a : List[str] = self.get_feature_extractor()
_a : Optional[Any] = TvltProcessor(image_processor=_a ,feature_extractor=_a )
_a : int = np.ones([1_2000] )
_a : int = np.ones([3, 224, 224] )
_a : str = processor(audio=_a ,images=_a )
self.assertListEqual(list(inputs.keys() ) ,['audio_values', 'audio_mask', 'pixel_values', 'pixel_mask'] )
# test if it raises when no input is passed
with pytest.raises(_a ):
processor()
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
_a : int = self.get_image_processor()
_a : Tuple = self.get_feature_extractor()
_a : str = TvltProcessor(image_processor=_a ,feature_extractor=_a )
self.assertListEqual(
processor.model_input_names ,image_processor.model_input_names + feature_extractor.model_input_names ,msg='`processor` and `image_processor`+`feature_extractor` model input names do not match' ,)
| 271 |
'''simple docstring'''
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
__lowerCAmelCase = logging.getLogger()
@unittest.skip('''Temporarily disable the doc tests.''' )
@require_torch
@require_tf
@slow
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self : str ,_a : Path ,_a : Union[str, None] = None ,_a : Union[List[str], None] = None ,_a : Union[str, List[str], None] = None ,_a : bool = True ,):
'''simple docstring'''
_a : Optional[int] = [file for file in os.listdir(_a ) if os.path.isfile(os.path.join(_a ,_a ) )]
if identifier is not None:
_a : List[str] = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(_a ,_a ):
for n_ in n_identifier:
_a : Tuple = [file for file in files if n_ not in file]
else:
_a : Optional[Any] = [file for file in files if n_identifier not in file]
_a : List[str] = ignore_files or []
ignore_files.append('__init__.py' )
_a : Tuple = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print('Testing' ,_a )
if only_modules:
_a : Any = file.split('.' )[0]
try:
_a : List[str] = getattr(_a ,_a )
_a : int = doctest.DocTestSuite(_a )
_a : Any = unittest.TextTestRunner().run(_a )
self.assertIs(len(result.failures ) ,0 )
except AttributeError:
logger.info(F"""{module_identifier} is not a module.""" )
else:
_a : Union[str, Any] = doctest.testfile(str('..' / directory / file ) ,optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed ,0 )
def __lowercase ( self : Any ):
'''simple docstring'''
_a : int = Path('src/transformers' )
_a : List[Any] = 'modeling'
_a : Optional[Any] = [
'modeling_ctrl.py',
'modeling_tf_ctrl.py',
]
self.analyze_directory(_a ,identifier=_a ,ignore_files=_a )
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
_a : Optional[Any] = Path('src/transformers' )
_a : Optional[Any] = 'tokenization'
self.analyze_directory(_a ,identifier=_a )
def __lowercase ( self : List[str] ):
'''simple docstring'''
_a : Dict = Path('src/transformers' )
_a : str = 'configuration'
self.analyze_directory(_a ,identifier=_a )
def __lowercase ( self : Any ):
'''simple docstring'''
_a : Tuple = Path('src/transformers' )
_a : List[Any] = ['configuration', 'modeling', 'tokenization']
self.analyze_directory(_a ,n_identifier=_a )
def __lowercase ( self : Tuple ):
'''simple docstring'''
_a : List[Any] = Path('docs/source' )
_a : List[str] = ['favicon.ico']
self.analyze_directory(_a ,ignore_files=_a ,only_modules=_a )
| 271 | 1 |
'''simple docstring'''
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
"""pipelines_utils""",
"""0.22.0""",
"""Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.""",
standard_warn=False,
stacklevel=3,
)
| 271 |
'''simple docstring'''
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCAmelCase_ (__a : Optional[Any] , __a : str , __a : Optional[Any]=None ):
"""simple docstring"""
assert torch_layer.weight.shape == weight.shape, f"""{torch_layer} layer.weight does not match"""
_a : str = nn.Parameter(__a )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, f"""{torch_layer} layer.bias does not match"""
_a : Any = nn.Parameter(__a )
def UpperCAmelCase_ (__a : int , __a : Optional[Any] , __a : int ):
"""simple docstring"""
_a : Tuple = np.asarray(weights[0] )
_a : Union[str, Any] = np.asarray(weights[1] )
_a : Dict = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(__a ).transpose(1 , 2 ).contiguous().view(-1 , __a ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(__a ).transpose(1 , 2 ).contiguous().view(-1 , __a ) , )
set_param(
torch_layer.output.dense , torch.tensor(__a ).view(-1 , __a ).contiguous().transpose(0 , 1 ) , )
def UpperCAmelCase_ (__a : Optional[Any] , __a : Optional[int] , __a : List[str] ):
"""simple docstring"""
_a : Dict = np.asarray(weights[0] )
_a : Union[str, Any] = np.asarray(weights[1] )
_a : str = np.asarray(weights[2] )
_a : int = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(__a ).transpose(1 , 2 ).contiguous().view(-1 , __a ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(__a ).transpose(1 , 2 ).contiguous().view(-1 , __a ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(__a ).transpose(1 , 2 ).contiguous().view(-1 , __a ) , )
set_param(
torch_layer.output.dense , torch.tensor(__a ).view(-1 , __a ).contiguous().transpose(0 , 1 ) , )
def UpperCAmelCase_ (__a : Any , __a : Any , __a : Optional[Any] ):
"""simple docstring"""
_a : List[str] = weights[0][0][0]
_a : List[Any] = np.asarray(layer_norm_a[0] )
_a : List[str] = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(__a ) , torch.tensor(__a ) , )
# lsh weights + output
_a : List[str] = weights[0][1]
if len(__a ) < 4:
set_layer_weights_in_torch_lsh(__a , torch_block.attention , __a )
else:
set_layer_weights_in_torch_local(__a , torch_block.attention , __a )
# intermediate weighs
_a : Optional[Any] = weights[2][0][1][2]
# Chunked Feed Forward
if len(__a ) == 4:
_a : Union[str, Any] = intermediate_weights[2]
# layernorm 2
_a : Any = np.asarray(intermediate_weights[0][0] )
_a : List[Any] = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(__a ) , torch.tensor(__a ) , )
# intermediate dense
_a : Any = np.asarray(intermediate_weights[1][0] )
_a : Any = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(__a ).transpose(0 , 1 ).contiguous() , torch.tensor(__a ) , )
# intermediate out
_a : Optional[int] = np.asarray(intermediate_weights[4][0] )
_a : int = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(__a ).transpose(0 , 1 ).contiguous() , torch.tensor(__a ) , )
def UpperCAmelCase_ (__a : Dict , __a : Dict , __a : List[Any] ):
"""simple docstring"""
_a : Optional[int] = torch_model.reformer
# word embeds
_a : Tuple = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(__a ) , )
if isinstance(weights[3] , __a ):
_a : Any = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
_a : List[Any] = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), f"""{position_embeddings[emb_idx]} emb does not match"""
_a : Any = nn.Parameter(torch.tensor(__a ) )
_a : List[str] = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
__a ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
_a : Tuple = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(__a , __a , __a )
# output layer norm
_a : Optional[Any] = np.asarray(weights[7][0] )
_a : int = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(__a ) , torch.tensor(__a ) , )
# output embeddings
_a : List[str] = np.asarray(weights[9][0] )
_a : int = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(__a ).transpose(0 , 1 ).contiguous() , torch.tensor(__a ) , )
def UpperCAmelCase_ (__a : Tuple , __a : Optional[Any] , __a : Dict ):
"""simple docstring"""
_a : List[Any] = ReformerConfig.from_json_file(__a )
print(f"""Building PyTorch model from configuration: {config}""" )
_a : int = ReformerModelWithLMHead(__a )
with open(__a , 'rb' ) as f:
_a : Optional[Any] = pickle.load(__a )['weights']
set_model_weights_in_torch(__a , __a , config.hidden_size )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , __a )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--trax_model_pkl_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained Reformer model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
__lowerCAmelCase = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 271 | 1 |
'''simple docstring'''
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
__lowerCAmelCase = WebClient(token=os.environ["""CI_SLACK_BOT_TOKEN"""])
def UpperCAmelCase_ (__a : Optional[int] ):
"""simple docstring"""
_a : List[str] = test_results.split(' ' )
_a : Any = 0
_a : Union[str, Any] = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
_a : Any = expressions[-2] if '=' in expressions[-1] else expressions[-1]
for i, expression in enumerate(__a ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def UpperCAmelCase_ (__a : Any ):
"""simple docstring"""
_a : Optional[Any] = {}
_a : Tuple = None
_a : str = False
for line in failures_short_lines.split('\n' ):
if re.search(R'_ \[doctest\]' , __a ):
_a : Tuple = True
_a : str = line.split(' ' )[2]
elif in_error and not line.split(' ' )[0].isdigit():
_a : Dict = line
_a : List[Any] = False
return failures
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self : Tuple ,_a : str ,_a : Dict ):
'''simple docstring'''
_a : Tuple = title
_a : Tuple = doc_test_results['time_spent'].split(',' )[0]
_a : List[str] = doc_test_results['success']
_a : List[Any] = doc_test_results['failures']
_a : str = self.n_success + self.n_failures
# Failures and success of the modeling tests
_a : Union[str, Any] = doc_test_results
@property
def __lowercase ( self : str ):
'''simple docstring'''
_a : Any = [self._time_spent]
_a : Union[str, Any] = 0
for time in time_spent:
_a : Any = time.split(':' )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(_a ) == 1:
_a : Optional[int] = [0, 0, time_parts[0]]
_a, _a, _a : Union[str, Any] = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 3600 + minutes * 60 + seconds
_a, _a, _a : List[str] = total_secs // 3600, (total_secs % 3600) // 60, total_secs % 60
return F"""{int(_a )}h{int(_a )}m{int(_a )}s"""
@property
def __lowercase ( self : Any ):
'''simple docstring'''
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def __lowercase ( self : List[str] ):
'''simple docstring'''
return {
"type": "section",
"text": {
"type": "plain_text",
"text": F"""🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.""",
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F"""https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}""",
},
}
@property
def __lowercase ( self : int ):
'''simple docstring'''
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
F"""There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in"""
F""" {self.time}."""
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F"""https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}""",
},
}
@property
def __lowercase ( self : Tuple ):
'''simple docstring'''
_a : Any = 40
_a : List[Any] = {k: v['failed'] for k, v in doc_test_results.items() if isinstance(_a ,_a )}
_a : Any = ''
for category, failures in category_failures.items():
if len(_a ) == 0:
continue
if report != "":
report += "\n\n"
report += F"""*{category} failures*:""".ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(_a )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": F"""The following examples had failures:\n\n\n{report}\n""",
},
}
@property
def __lowercase ( self : Any ):
'''simple docstring'''
_a : Tuple = [self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(_a )
@staticmethod
def __lowercase ( ):
'''simple docstring'''
_a : Optional[int] = [
{
'type': 'section',
'text': {
'type': 'plain_text',
'text': 'There was an issue running the tests.',
},
'accessory': {
'type': 'button',
'text': {'type': 'plain_text', 'text': 'Check Action results', 'emoji': True},
'url': F"""https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}""",
},
}
]
print('Sending the following payload' )
print(json.dumps({'blocks': json.loads(_a )} ) )
client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] ,text='There was an issue running the tests.' ,blocks=_a ,)
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
print('Sending the following payload' )
print(json.dumps({'blocks': json.loads(self.payload )} ) )
_a : Optional[int] = F"""{self.n_failures} failures out of {self.n_tests} tests,""" if self.n_failures else 'All tests passed.'
_a : Union[str, Any] = client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] ,blocks=self.payload ,text=_a ,)
def __lowercase ( self : Dict ,_a : List[Any] ,_a : Any ,_a : List[Any] ,_a : Tuple ):
'''simple docstring'''
_a : List[Any] = ''
for key, value in failures.items():
_a : Dict = value[:200] + ' [Truncated]' if len(_a ) > 250 else value
failures_text += F"""*{key}*\n_{value}_\n\n"""
_a : Optional[int] = job_name
_a : Optional[Any] = {'type': 'section', 'text': {'type': 'mrkdwn', 'text': text}}
if job_link is not None:
_a : str = {
'type': 'button',
'text': {'type': 'plain_text', 'text': 'GitHub Action job', 'emoji': True},
'url': job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def __lowercase ( self : Any ):
'''simple docstring'''
if self.thread_ts is None:
raise ValueError('Can only post reply if a post has been made.' )
_a : int = self.doc_test_results.pop('job_link' )
self.doc_test_results.pop('failures' )
self.doc_test_results.pop('success' )
self.doc_test_results.pop('time_spent' )
_a : Tuple = sorted(self.doc_test_results.items() ,key=lambda _a : t[0] )
for job, job_result in sorted_dict:
if len(job_result['failures'] ):
_a : Optional[Any] = F"""*Num failures* :{len(job_result['failed'] )} \n"""
_a : str = job_result['failures']
_a : str = self.get_reply_blocks(_a ,_a ,_a ,text=_a )
print('Sending the following reply' )
print(json.dumps({'blocks': blocks} ) )
client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] ,text=F"""Results for {job}""" ,blocks=_a ,thread_ts=self.thread_ts['ts'] ,)
time.sleep(1 )
def UpperCAmelCase_ ():
"""simple docstring"""
_a : Dict = os.environ['GITHUB_RUN_ID']
_a : List[Any] = f"""https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100"""
_a : Union[str, Any] = requests.get(__a ).json()
_a : Union[str, Any] = {}
try:
jobs.update({job['name']: job['html_url'] for job in result['jobs']} )
_a : Tuple = math.ceil((result['total_count'] - 1_0_0) / 1_0_0 )
for i in range(__a ):
_a : Optional[Any] = requests.get(url + f"""&page={i + 2}""" ).json()
jobs.update({job['name']: job['html_url'] for job in result['jobs']} )
return jobs
except Exception as e:
print('Unknown error, could not fetch links.' , __a )
return {}
def UpperCAmelCase_ (__a : str ):
"""simple docstring"""
_a : Optional[int] = {}
if os.path.exists(__a ):
_a : List[Any] = os.listdir(__a )
for file in files:
try:
with open(os.path.join(__a , __a ) , encoding='utf-8' ) as f:
_a : int = f.read()
except UnicodeDecodeError as e:
raise ValueError(f"""Could not open {os.path.join(__a , __a )}.""" ) from e
return _artifact
def UpperCAmelCase_ ():
"""simple docstring"""
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self : Optional[Any] ,_a : str ):
'''simple docstring'''
_a : Union[str, Any] = name
_a : str = []
def __str__( self : Tuple ):
'''simple docstring'''
return self.name
def __lowercase ( self : Dict ,_a : str ):
'''simple docstring'''
self.paths.append({'name': self.name, 'path': path} )
_a : Dict[str, Artifact] = {}
_a : Any = filter(os.path.isdir , os.listdir() )
for directory in directories:
_a : Dict = directory
if artifact_name not in _available_artifacts:
_a : int = Artifact(__a )
_available_artifacts[artifact_name].add_path(__a )
return _available_artifacts
if __name__ == "__main__":
__lowerCAmelCase = get_job_links()
__lowerCAmelCase = retrieve_available_artifacts()
__lowerCAmelCase = collections.OrderedDict(
[
("""*.py""", """API Examples"""),
("""*.md""", """MD Examples"""),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
__lowerCAmelCase = {
v: {
"""failed""": [],
"""failures""": {},
}
for v in docs.values()
}
# Link to the GitHub Action job
__lowerCAmelCase = github_actions_job_links.get("""run_doctests""")
__lowerCAmelCase = available_artifacts["""doc_tests_gpu_test_reports"""].paths[0]
__lowerCAmelCase = retrieve_artifact(artifact_path["""name"""])
if "stats" in artifact:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = handle_test_results(artifact["""stats"""])
__lowerCAmelCase = failed
__lowerCAmelCase = success
__lowerCAmelCase = time_spent[1:-1] + """, """
__lowerCAmelCase = extract_first_line_failure(artifact["""failures_short"""])
for line in artifact["summary_short"].split("""\n"""):
if re.search("""FAILED""", line):
__lowerCAmelCase = line.replace("""FAILED """, """""")
__lowerCAmelCase = line.split()[0].replace("""\n""", """""")
if "::" in line:
__lowerCAmelCase , __lowerCAmelCase = line.split("""::""")
else:
__lowerCAmelCase , __lowerCAmelCase = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
__lowerCAmelCase = docs[file_regex]
doc_test_results[category]["failed"].append(test)
__lowerCAmelCase = all_failures[test] if test in all_failures else """N/A"""
__lowerCAmelCase = failure
break
__lowerCAmelCase = Message("""🤗 Results of the doc tests.""", doc_test_results)
message.post()
message.post_reply()
| 271 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@property
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
torch.manual_seed(0 )
_a : Any = UNetaDModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=3 ,out_channels=3 ,down_block_types=('DownBlock2D', 'AttnDownBlock2D') ,up_block_types=('AttnUpBlock2D', 'UpBlock2D') ,)
return model
@property
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
torch.manual_seed(0 )
_a : Union[str, Any] = VQModel(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] ,up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] ,latent_channels=3 ,)
return model
@property
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
torch.manual_seed(0 )
_a : Any = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,)
return CLIPTextModel(_a )
def __lowercase ( self : Tuple ):
'''simple docstring'''
_a : Dict = self.dummy_uncond_unet
_a : List[Any] = DDIMScheduler()
_a : List[Any] = self.dummy_vq_model
_a : str = LDMPipeline(unet=_a ,vqvae=_a ,scheduler=_a )
ldm.to(_a )
ldm.set_progress_bar_config(disable=_a )
_a : List[str] = torch.manual_seed(0 )
_a : List[str] = ldm(generator=_a ,num_inference_steps=2 ,output_type='numpy' ).images
_a : List[str] = torch.manual_seed(0 )
_a : Union[str, Any] = ldm(generator=_a ,num_inference_steps=2 ,output_type='numpy' ,return_dict=_a )[0]
_a : Tuple = image[0, -3:, -3:, -1]
_a : Optional[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_a : int = np.array([0.8512, 0.818, 0.6411, 0.6808, 0.4465, 0.5618, 0.46, 0.6231, 0.5172] )
_a : Any = 1E-2 if torch_device != 'mps' else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
_a : List[str] = LDMPipeline.from_pretrained('CompVis/ldm-celebahq-256' )
ldm.to(_a )
ldm.set_progress_bar_config(disable=_a )
_a : Optional[int] = torch.manual_seed(0 )
_a : Dict = ldm(generator=_a ,num_inference_steps=5 ,output_type='numpy' ).images
_a : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
_a : Optional[int] = np.array([0.4399, 0.4_4975, 0.4_6825, 0.474, 0.4359, 0.4581, 0.4_5095, 0.4341, 0.4447] )
_a : int = 1E-2 if torch_device != 'mps' else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 271 | 1 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
"""salesforce/blip2-opt-2.7b""": """https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json""",
}
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
__UpperCAmelCase : Dict = '''blip_2_vision_model'''
def __init__( self : Tuple ,_a : int=1408 ,_a : str=6144 ,_a : Union[str, Any]=39 ,_a : List[Any]=16 ,_a : int=224 ,_a : str=14 ,_a : Optional[int]="gelu" ,_a : List[Any]=0.0_0001 ,_a : Any=0.0 ,_a : Any=1E-10 ,_a : int=True ,**_a : Union[str, Any] ,):
'''simple docstring'''
super().__init__(**_a )
_a : int = hidden_size
_a : Dict = intermediate_size
_a : str = num_hidden_layers
_a : str = num_attention_heads
_a : Dict = patch_size
_a : str = image_size
_a : List[str] = initializer_range
_a : str = attention_dropout
_a : List[Any] = layer_norm_eps
_a : str = hidden_act
_a : Any = qkv_bias
@classmethod
def __lowercase ( cls : List[str] ,_a : Union[str, os.PathLike] ,**_a : Optional[Any] ):
'''simple docstring'''
cls._set_token_in_kwargs(_a )
_a, _a : Optional[Any] = cls.get_config_dict(_a ,**_a )
# get the vision config dict if we are loading from Blip2Config
if config_dict.get('model_type' ) == "blip-2":
_a : List[Any] = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls ,'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_a ,**_a )
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
__UpperCAmelCase : Any = '''blip_2_qformer'''
def __init__( self : int ,_a : Any=3_0522 ,_a : Any=768 ,_a : Optional[Any]=12 ,_a : Union[str, Any]=12 ,_a : Union[str, Any]=3072 ,_a : Dict="gelu" ,_a : Optional[int]=0.1 ,_a : Dict=0.1 ,_a : Optional[Any]=512 ,_a : Union[str, Any]=0.02 ,_a : Optional[int]=1E-12 ,_a : Any=0 ,_a : Dict="absolute" ,_a : Tuple=2 ,_a : Tuple=1408 ,**_a : Dict ,):
'''simple docstring'''
super().__init__(pad_token_id=_a ,**_a )
_a : Optional[int] = vocab_size
_a : Any = hidden_size
_a : List[Any] = num_hidden_layers
_a : Any = num_attention_heads
_a : List[str] = hidden_act
_a : Dict = intermediate_size
_a : Dict = hidden_dropout_prob
_a : Dict = attention_probs_dropout_prob
_a : Dict = max_position_embeddings
_a : str = initializer_range
_a : int = layer_norm_eps
_a : List[Any] = position_embedding_type
_a : Union[str, Any] = cross_attention_frequency
_a : str = encoder_hidden_size
@classmethod
def __lowercase ( cls : Tuple ,_a : Union[str, os.PathLike] ,**_a : Any ):
'''simple docstring'''
cls._set_token_in_kwargs(_a )
_a, _a : Any = cls.get_config_dict(_a ,**_a )
# get the qformer config dict if we are loading from Blip2Config
if config_dict.get('model_type' ) == "blip-2":
_a : Dict = config_dict['qformer_config']
if "model_type" in config_dict and hasattr(cls ,'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_a ,**_a )
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = '''blip-2'''
__UpperCAmelCase : Dict = True
def __init__( self : List[str] ,_a : str=None ,_a : Optional[Any]=None ,_a : Tuple=None ,_a : Optional[int]=32 ,**_a : Union[str, Any] ):
'''simple docstring'''
super().__init__(**_a )
if vision_config is None:
_a : List[str] = {}
logger.info('vision_config is None. initializing the Blip2VisionConfig with default values.' )
if qformer_config is None:
_a : str = {}
logger.info('qformer_config is None. Initializing the Blip2QFormerConfig with default values.' )
if text_config is None:
_a : Union[str, Any] = {}
logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' )
_a : Optional[int] = BlipaVisionConfig(**_a )
_a : Optional[int] = BlipaQFormerConfig(**_a )
_a : Dict = text_config['model_type'] if 'model_type' in text_config else 'opt'
_a : Optional[Any] = CONFIG_MAPPING[text_model_type](**_a )
_a : str = self.text_config.tie_word_embeddings
_a : Any = self.text_config.is_encoder_decoder
_a : Tuple = num_query_tokens
_a : Dict = self.vision_config.hidden_size
_a : Tuple = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
_a : int = 1.0
_a : Dict = 0.02
@classmethod
def __lowercase ( cls : int ,_a : BlipaVisionConfig ,_a : BlipaQFormerConfig ,_a : PretrainedConfig ,**_a : Tuple ,):
'''simple docstring'''
return cls(
vision_config=vision_config.to_dict() ,qformer_config=qformer_config.to_dict() ,text_config=text_config.to_dict() ,**_a ,)
def __lowercase ( self : List[str] ):
'''simple docstring'''
_a : List[Any] = copy.deepcopy(self.__dict__ )
_a : List[Any] = self.vision_config.to_dict()
_a : Dict = self.qformer_config.to_dict()
_a : Optional[int] = self.text_config.to_dict()
_a : Union[str, Any] = self.__class__.model_type
return output
| 271 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
__lowerCAmelCase = logging.get_logger(__name__)
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
def __init__( self : int ,*_a : Optional[int] ,**_a : str ):
'''simple docstring'''
warnings.warn(
'The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use BeitImageProcessor instead.' ,_a ,)
super().__init__(*_a ,**_a )
| 271 | 1 |
'''simple docstring'''
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@parameterized.expand([(None,), ('foo.json',)] )
def __lowercase ( self : Optional[Any] ,_a : str ):
'''simple docstring'''
_a : List[str] = GenerationConfig(
do_sample=_a ,temperature=0.7 ,length_penalty=1.0 ,bad_words_ids=[[1, 2, 3], [4, 5]] ,)
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(_a ,config_name=_a )
_a : Tuple = GenerationConfig.from_pretrained(_a ,config_name=_a )
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample ,_a )
self.assertEqual(loaded_config.temperature ,0.7 )
self.assertEqual(loaded_config.length_penalty ,1.0 )
self.assertEqual(loaded_config.bad_words_ids ,[[1, 2, 3], [4, 5]] )
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k ,50 )
self.assertEqual(loaded_config.max_length ,20 )
self.assertEqual(loaded_config.max_time ,_a )
def __lowercase ( self : int ):
'''simple docstring'''
_a : Union[str, Any] = AutoConfig.from_pretrained('gpt2' )
_a : Tuple = GenerationConfig.from_model_config(_a )
_a : List[Any] = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(_a ,_a )
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id ,default_generation_config.eos_token_id )
self.assertEqual(generation_config_from_model.eos_token_id ,model_config.eos_token_id )
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
_a : Optional[Any] = GenerationConfig()
_a : Dict = {
'max_new_tokens': 1024,
'foo': 'bar',
}
_a : Optional[int] = copy.deepcopy(_a )
_a : List[str] = generation_config.update(**_a )
# update_kwargs was not modified (no side effects)
self.assertEqual(_a ,_a )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens ,1024 )
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(_a ,{'foo': 'bar'} )
def __lowercase ( self : Dict ):
'''simple docstring'''
_a : Optional[Any] = GenerationConfig()
_a : Optional[Any] = 'bar'
with tempfile.TemporaryDirectory('test-generation-config' ) as tmp_dir:
generation_config.save_pretrained(_a )
_a : Optional[int] = GenerationConfig.from_pretrained(_a )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo ,'bar' )
_a : List[str] = GenerationConfig.from_model_config(_a )
assert not hasattr(_a ,'foo' ) # no new kwargs should be initialized if from config
def __lowercase ( self : List[Any] ):
'''simple docstring'''
_a : List[str] = GenerationConfig()
self.assertEqual(default_config.temperature ,1.0 )
self.assertEqual(default_config.do_sample ,_a )
self.assertEqual(default_config.num_beams ,1 )
_a : List[Any] = GenerationConfig(
do_sample=_a ,temperature=0.7 ,length_penalty=1.0 ,bad_words_ids=[[1, 2, 3], [4, 5]] ,)
self.assertEqual(config.temperature ,0.7 )
self.assertEqual(config.do_sample ,_a )
self.assertEqual(config.num_beams ,1 )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(_a )
_a : Optional[Any] = GenerationConfig.from_pretrained(_a ,temperature=1.0 )
self.assertEqual(loaded_config.temperature ,1.0 )
self.assertEqual(loaded_config.do_sample ,_a )
self.assertEqual(loaded_config.num_beams ,1 ) # default value
@is_staging_test
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def __lowercase ( cls : str ):
'''simple docstring'''
_a : List[str] = TOKEN
HfFolder.save_token(_a )
@classmethod
def __lowercase ( cls : Optional[int] ):
'''simple docstring'''
try:
delete_repo(token=cls._token ,repo_id='test-generation-config' )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id='valid_org/test-generation-config-org' )
except HTTPError:
pass
def __lowercase ( self : str ):
'''simple docstring'''
_a : List[Any] = GenerationConfig(
do_sample=_a ,temperature=0.7 ,length_penalty=1.0 ,)
config.push_to_hub('test-generation-config' ,use_auth_token=self._token )
_a : Optional[Any] = GenerationConfig.from_pretrained(F"""{USER}/test-generation-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_a ,getattr(_a ,_a ) )
# Reset repo
delete_repo(token=self._token ,repo_id='test-generation-config' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
_a ,repo_id='test-generation-config' ,push_to_hub=_a ,use_auth_token=self._token )
_a : Optional[int] = GenerationConfig.from_pretrained(F"""{USER}/test-generation-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_a ,getattr(_a ,_a ) )
def __lowercase ( self : Tuple ):
'''simple docstring'''
_a : Tuple = GenerationConfig(
do_sample=_a ,temperature=0.7 ,length_penalty=1.0 ,)
config.push_to_hub('valid_org/test-generation-config-org' ,use_auth_token=self._token )
_a : Union[str, Any] = GenerationConfig.from_pretrained('valid_org/test-generation-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_a ,getattr(_a ,_a ) )
# Reset repo
delete_repo(token=self._token ,repo_id='valid_org/test-generation-config-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
_a ,repo_id='valid_org/test-generation-config-org' ,push_to_hub=_a ,use_auth_token=self._token )
_a : Tuple = GenerationConfig.from_pretrained('valid_org/test-generation-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_a ,getattr(_a ,_a ) )
| 271 |
'''simple docstring'''
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
"""linear""": get_linear_schedule_with_warmup,
"""cosine""": get_cosine_schedule_with_warmup,
"""cosine_w_restarts""": get_cosine_with_hard_restarts_schedule_with_warmup,
"""polynomial""": get_polynomial_decay_schedule_with_warmup,
"""constant""": get_constant_schedule,
"""constant_w_warmup""": get_constant_schedule_with_warmup,
}
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
def __init__( self : Optional[int] ,_a : Optional[Any]=None ,_a : Dict=None ,*_a : int ,**_a : str ):
'''simple docstring'''
super().__init__(*_a ,**_a )
if config is None:
assert isinstance(self.model ,_a ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
F""" {self.model.__class__}"""
)
_a : List[Any] = self.model.config
else:
_a : Optional[int] = config
_a : List[str] = data_args
_a : List[Any] = self.config.tgt_vocab_size if isinstance(self.config ,_a ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
F"""The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for"""
' padding..' )
if self.args.label_smoothing == 0:
_a : List[str] = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
_a : Tuple = label_smoothed_nll_loss
def __lowercase ( self : List[str] ,_a : int ):
'''simple docstring'''
if self.optimizer is None:
_a : Union[str, Any] = ['bias', 'LayerNorm.weight']
_a : Tuple = [
{
'params': [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
'weight_decay': self.args.weight_decay,
},
{
'params': [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
'weight_decay': 0.0,
},
]
_a : Optional[int] = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
_a : Any = Adafactor
_a : Dict = {'scale_parameter': False, 'relative_step': False}
else:
_a : Union[str, Any] = AdamW
_a : str = {
'betas': (self.args.adam_betaa, self.args.adam_betaa),
'eps': self.args.adam_epsilon,
}
_a : Union[str, Any] = self.args.learning_rate
if self.sharded_ddp:
_a : str = OSS(
params=_a ,optim=_a ,**_a ,)
else:
_a : Tuple = optimizer_cls(_a ,**_a )
if self.lr_scheduler is None:
_a : List[Any] = self._get_lr_scheduler(_a )
else: # ignoring --lr_scheduler
logger.warning('scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.' )
def __lowercase ( self : List[Any] ,_a : List[Any] ):
'''simple docstring'''
_a : str = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
_a : int = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
_a : List[str] = schedule_func(self.optimizer ,num_warmup_steps=self.args.warmup_steps )
else:
_a : Optional[int] = schedule_func(
self.optimizer ,num_warmup_steps=self.args.warmup_steps ,num_training_steps=_a )
return scheduler
def __lowercase ( self : Tuple ):
'''simple docstring'''
if isinstance(self.train_dataset ,torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size ,distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) ,)
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def __lowercase ( self : Dict ,_a : Dict ,_a : Any ,_a : Dict ):
'''simple docstring'''
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
_a : List[Any] = model(**_a ,use_cache=_a )[0]
_a : Union[str, Any] = self.loss_fn(logits.view(-1 ,logits.shape[-1] ) ,labels.view(-1 ) )
else:
# compute usual loss via models
_a, _a : Union[str, Any] = model(**_a ,labels=_a ,use_cache=_a )[:2]
else:
# compute label smoothed loss
_a : List[Any] = model(**_a ,use_cache=_a )[0]
_a : Any = torch.nn.functional.log_softmax(_a ,dim=-1 )
_a, _a : List[str] = self.loss_fn(_a ,_a ,self.args.label_smoothing ,ignore_index=self.config.pad_token_id )
return loss, logits
def __lowercase ( self : Optional[int] ,_a : Union[str, Any] ,_a : List[Any] ):
'''simple docstring'''
_a : Optional[int] = inputs.pop('labels' )
_a, _a : int = self._compute_loss(_a ,_a ,_a )
return loss
def __lowercase ( self : Optional[Any] ,_a : nn.Module ,_a : Dict[str, Union[torch.Tensor, Any]] ,_a : bool ,_a : Optional[List[str]] = None ,):
'''simple docstring'''
_a : int = self._prepare_inputs(_a )
_a : Any = {
'max_length': self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
'num_beams': self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
_a : int = self.model.generate(
inputs['input_ids'] ,attention_mask=inputs['attention_mask'] ,**_a ,)
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
_a : int = self._pad_tensors_to_max_len(_a ,gen_kwargs['max_length'] )
_a : Union[str, Any] = inputs.pop('labels' )
with torch.no_grad():
# compute loss on predict data
_a, _a : Optional[int] = self._compute_loss(_a ,_a ,_a )
_a : Optional[Any] = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
_a : Optional[Any] = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
_a : Dict = self._pad_tensors_to_max_len(_a ,gen_kwargs['max_length'] )
return (loss, logits, labels)
def __lowercase ( self : str ,_a : Tuple ,_a : Tuple ):
'''simple docstring'''
_a : List[Any] = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
'Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be'
F""" padded to `max_length`={max_length}""" )
_a : int = pad_token_id * torch.ones(
(tensor.shape[0], max_length) ,dtype=tensor.dtype ,device=tensor.device )
_a : Union[str, Any] = tensor
return padded_tensor
| 271 | 1 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = """▁"""
__lowerCAmelCase = {"""vocab_file""": """sentencepiece.bpe.model"""}
__lowerCAmelCase = {
"""vocab_file""": {
"""facebook/mbart-large-50-one-to-many-mmt""": (
"""https://huggingface.co/facebook/mbart-large-50-one-to-many-mmt/resolve/main/sentencepiece.bpe.model"""
),
}
}
__lowerCAmelCase = {
"""facebook/mbart-large-50-one-to-many-mmt""": 1_0_2_4,
}
# fmt: off
__lowerCAmelCase = ["""ar_AR""", """cs_CZ""", """de_DE""", """en_XX""", """es_XX""", """et_EE""", """fi_FI""", """fr_XX""", """gu_IN""", """hi_IN""", """it_IT""", """ja_XX""", """kk_KZ""", """ko_KR""", """lt_LT""", """lv_LV""", """my_MM""", """ne_NP""", """nl_XX""", """ro_RO""", """ru_RU""", """si_LK""", """tr_TR""", """vi_VN""", """zh_CN""", """af_ZA""", """az_AZ""", """bn_IN""", """fa_IR""", """he_IL""", """hr_HR""", """id_ID""", """ka_GE""", """km_KH""", """mk_MK""", """ml_IN""", """mn_MN""", """mr_IN""", """pl_PL""", """ps_AF""", """pt_XX""", """sv_SE""", """sw_KE""", """ta_IN""", """te_IN""", """th_TH""", """tl_XX""", """uk_UA""", """ur_PK""", """xh_ZA""", """gl_ES""", """sl_SI"""]
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
__UpperCAmelCase : Tuple = VOCAB_FILES_NAMES
__UpperCAmelCase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : str = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : int = ['''input_ids''', '''attention_mask''']
__UpperCAmelCase : List[int] = []
__UpperCAmelCase : List[int] = []
def __init__( self : Any ,_a : int ,_a : Union[str, Any]=None ,_a : Optional[int]=None ,_a : int="</s>" ,_a : Optional[Any]="</s>" ,_a : Dict="<s>" ,_a : List[Any]="<unk>" ,_a : Tuple="<pad>" ,_a : List[Any]="<mask>" ,_a : Optional[Dict[str, Any]] = None ,**_a : List[str] ,):
'''simple docstring'''
_a : Optional[Any] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else mask_token
_a : List[str] = {} if sp_model_kwargs is None else sp_model_kwargs
_a : Tuple = kwargs.get('additional_special_tokens' ,[] )
kwargs["additional_special_tokens"] += [
code for code in FAIRSEQ_LANGUAGE_CODES if code not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=_a ,tgt_lang=_a ,eos_token=_a ,unk_token=_a ,sep_token=_a ,cls_token=_a ,pad_token=_a ,mask_token=_a ,sp_model_kwargs=self.sp_model_kwargs ,**_a ,)
_a : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_a ) )
_a : List[Any] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
_a : Optional[int] = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
_a : Union[str, Any] = 1
_a : Optional[Any] = len(self.sp_model )
_a : Tuple = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(_a )
}
_a : Any = {v: k for k, v in self.lang_code_to_id.items()}
_a : Dict = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
_a : int = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
_a : Any = src_lang if src_lang is not None else 'en_XX'
_a : List[Any] = self.lang_code_to_id[self._src_lang]
_a : Union[str, Any] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def __lowercase ( self : int ):
'''simple docstring'''
return self._src_lang
@src_lang.setter
def __lowercase ( self : Any ,_a : str ):
'''simple docstring'''
_a : Optional[Any] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self : Dict ):
'''simple docstring'''
_a : Union[str, Any] = self.__dict__.copy()
_a : int = None
return state
def __setstate__( self : int ,_a : Dict ):
'''simple docstring'''
_a : int = d
# for backward compatibility
if not hasattr(self ,'sp_model_kwargs' ):
_a : Dict = {}
_a : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __lowercase ( self : Any ):
'''simple docstring'''
_a : Dict = {self.convert_ids_to_tokens(_a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __lowercase ( self : str ,_a : str ):
'''simple docstring'''
return self.sp_model.encode(_a ,out_type=_a )
def __lowercase ( self : str ,_a : str ):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_a : Tuple = self.sp_model.PieceToId(_a )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def __lowercase ( self : Optional[int] ,_a : int ):
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __lowercase ( self : Dict ,_a : str ):
'''simple docstring'''
_a : Union[str, Any] = []
_a : Dict = ''
_a : Any = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_a ) + token
_a : Any = True
_a : int = []
else:
current_sub_tokens.append(_a )
_a : int = False
out_string += self.sp_model.decode(_a )
return out_string.strip()
def __lowercase ( self : Optional[int] ,_a : str ,_a : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(_a ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_a : List[Any] = os.path.join(
_a ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,_a )
elif not os.path.isfile(self.vocab_file ):
with open(_a ,'wb' ) as fi:
_a : Optional[int] = self.sp_model.serialized_model_proto()
fi.write(_a )
return (out_vocab_file,)
def __lowercase ( self : List[str] ,_a : List[int] ,_a : Optional[List[int]] = None ,_a : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_a ,token_ids_a=_a ,already_has_special_tokens=_a )
_a : Optional[Any] = [1] * len(self.prefix_tokens )
_a : List[Any] = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(_a )) + suffix_ones
return prefix_ones + ([0] * len(_a )) + ([0] * len(_a )) + suffix_ones
def __lowercase ( self : Dict ,_a : List[int] ,_a : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __lowercase ( self : Union[str, Any] ,_a : Optional[Any] ,_a : str ,_a : Optional[str] ,_a : Optional[str] ,**_a : str ):
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
_a : str = src_lang
_a : Union[str, Any] = self(_a ,add_special_tokens=_a ,return_tensors=_a ,**_a )
_a : Any = self.convert_tokens_to_ids(_a )
_a : int = tgt_lang_id
return inputs
def __lowercase ( self : Optional[Any] ,_a : List[str] ,_a : str = "en_XX" ,_a : Optional[List[str]] = None ,_a : str = "ro_RO" ,**_a : str ,):
'''simple docstring'''
_a : Dict = src_lang
_a : List[Any] = tgt_lang
return super().prepare_seqaseq_batch(_a ,_a ,**_a )
def __lowercase ( self : Tuple ):
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def __lowercase ( self : Dict ):
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def __lowercase ( self : Optional[int] ,_a : str ):
'''simple docstring'''
_a : str = self.lang_code_to_id[src_lang]
_a : List[Any] = [self.cur_lang_code_id]
_a : Optional[int] = [self.eos_token_id]
def __lowercase ( self : Optional[int] ,_a : str ):
'''simple docstring'''
_a : int = self.lang_code_to_id[tgt_lang]
_a : Optional[Any] = [self.cur_lang_code_id]
_a : str = [self.eos_token_id]
| 271 |
'''simple docstring'''
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
__lowerCAmelCase = re.compile(r"""\s+""")
def UpperCAmelCase_ (__a : Any ):
"""simple docstring"""
return {"hash": hashlib.mda(re.sub(__a , '' , example['content'] ).encode('utf-8' ) ).hexdigest()}
def UpperCAmelCase_ (__a : str ):
"""simple docstring"""
_a : List[str] = [len(__a ) for line in example['content'].splitlines()]
return {"line_mean": np.mean(__a ), "line_max": max(__a )}
def UpperCAmelCase_ (__a : str ):
"""simple docstring"""
_a : Union[str, Any] = np.mean([c.isalnum() for c in example['content']] )
return {"alpha_frac": alpha_frac}
def UpperCAmelCase_ (__a : Optional[int] , __a : Any ):
"""simple docstring"""
if example["hash"] in uniques:
uniques.remove(example['hash'] )
return True
else:
return False
def UpperCAmelCase_ (__a : int , __a : Union[str, Any]=5 ):
"""simple docstring"""
_a : Optional[int] = ['auto-generated', 'autogenerated', 'automatically generated']
_a : List[str] = example['content'].splitlines()
for _, line in zip(range(__a ) , __a ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def UpperCAmelCase_ (__a : List[str] , __a : Dict=5 , __a : Tuple=0.05 ):
"""simple docstring"""
_a : Optional[int] = ['unit tests', 'test file', 'configuration file']
_a : int = example['content'].splitlines()
_a : int = 0
_a : Dict = 0
# first test
for _, line in zip(range(__a ) , __a ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
_a : int = example['content'].count('\n' )
_a : int = int(coeff * nlines )
for line in lines:
count_config += line.lower().count('config' )
count_test += line.lower().count('test' )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def UpperCAmelCase_ (__a : Optional[int] ):
"""simple docstring"""
_a : List[str] = ['def ', 'class ', 'for ', 'while ']
_a : str = example['content'].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def UpperCAmelCase_ (__a : int , __a : Any=4 ):
"""simple docstring"""
_a : List[str] = example['content'].splitlines()
_a : Dict = 0
for line in lines:
counter += line.lower().count('=' )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def UpperCAmelCase_ (__a : str ):
"""simple docstring"""
_a : Optional[Any] = tokenizer(example['content'] , truncation=__a )['input_ids']
_a : Optional[int] = len(example['content'] ) / len(__a )
return {"ratio": ratio}
def UpperCAmelCase_ (__a : str ):
"""simple docstring"""
_a : Dict = {}
results.update(get_hash(__a ) )
results.update(line_stats(__a ) )
results.update(alpha_stats(__a ) )
results.update(char_token_ratio(__a ) )
results.update(is_autogenerated(__a ) )
results.update(is_config_or_test(__a ) )
results.update(has_no_keywords(__a ) )
results.update(has_few_assignments(__a ) )
return results
def UpperCAmelCase_ (__a : Any , __a : Any , __a : str ):
"""simple docstring"""
if not check_uniques(__a , __a ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def UpperCAmelCase_ (__a : Union[str, Any] ):
"""simple docstring"""
with open(__a , 'rb' ) as f_in:
with gzip.open(str(__a ) + '.gz' , 'wb' , compresslevel=6 ) as f_out:
shutil.copyfileobj(__a , __a )
os.unlink(__a )
# Settings
__lowerCAmelCase = HfArgumentParser(PreprocessingArguments)
__lowerCAmelCase = parser.parse_args()
if args.num_workers is None:
__lowerCAmelCase = multiprocessing.cpu_count()
__lowerCAmelCase = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
__lowerCAmelCase = time.time()
__lowerCAmelCase = load_dataset(args.dataset_name, split="""train""")
print(f'''Time to load dataset: {time.time()-t_start:.2f}''')
# Run preprocessing
__lowerCAmelCase = time.time()
__lowerCAmelCase = ds.map(preprocess, num_proc=args.num_workers)
print(f'''Time to preprocess dataset: {time.time()-t_start:.2f}''')
# Deduplicate hashes
__lowerCAmelCase = set(ds.unique("""hash"""))
__lowerCAmelCase = len(uniques) / len(ds)
print(f'''Fraction of duplicates: {1-frac:.2%}''')
# Deduplicate data and apply heuristics
__lowerCAmelCase = time.time()
__lowerCAmelCase = ds.filter(filter, fn_kwargs={"""uniques""": uniques, """args""": args})
print(f'''Time to filter dataset: {time.time()-t_start:.2f}''')
print(f'''Size of filtered dataset: {len(ds_filter)}''')
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
__lowerCAmelCase = time.time()
__lowerCAmelCase , __lowerCAmelCase = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(f'''Time to deduplicate dataset: {time.time()-t_start:.2f}''')
print(f'''Size of deduplicate dataset: {len(ds_filter)}''')
# Save data in batches of samples_per_file
__lowerCAmelCase = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / """duplicate_clusters.json""", """w""") as f:
json.dump(duplicate_clusters, f)
__lowerCAmelCase = output_dir / """data"""
data_dir.mkdir(exist_ok=True)
__lowerCAmelCase = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
__lowerCAmelCase = str(data_dir / f'''file-{file_number+1:012}.json''')
__lowerCAmelCase = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(f'''Time to save dataset: {time.time()-t_start:.2f}''')
| 271 | 1 |
'''simple docstring'''
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def UpperCAmelCase_ ():
"""simple docstring"""
_a : Optional[int] = {
'repo_name': ['test_repo1', 'test_repo2', 'test_repo3'],
'path': ['test_1.py', 'test_2.py', 'unit_test.py'],
'content': ['a ' * 2_0, 'a ' * 3_0, 'b ' * 7],
}
_a : Tuple = Dataset.from_dict(__a )
return dataset
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
def __lowercase ( self : int ):
'''simple docstring'''
_a : List[str] = get_dataset()
_a : str = make_duplicate_clusters(_a ,0.85 )
self.assertEqual(len(duplicate_clusters[0] ) ,2 )
def __lowercase ( self : List[str] ):
'''simple docstring'''
_a : Union[str, Any] = get_dataset()
_a, _a : Union[str, Any] = deduplicate_dataset(_a )
self.assertEqual(len(_a ) ,2 )
print(_a )
self.assertEqual(duplicate_clusters[0][0]['copies'] ,2 )
self.assertEqual(duplicate_clusters[0][0]['is_extreme'] ,_a )
| 271 |
'''simple docstring'''
import argparse
from typing import List
import evaluate
import numpy as np
import torch
from datasets import DatasetDict, load_dataset
# New Code #
# We'll be using StratifiedKFold for this example
from sklearn.model_selection import StratifiedKFold
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to perform Cross Validation,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__lowerCAmelCase = 1_6
__lowerCAmelCase = 3_2
def UpperCAmelCase_ (__a : Accelerator , __a : DatasetDict , __a : List[int] , __a : List[int] , __a : int = 1_6 ):
"""simple docstring"""
_a : Union[str, Any] = AutoTokenizer.from_pretrained('bert-base-cased' )
_a : str = DatasetDict(
{
'train': dataset['train'].select(__a ),
'validation': dataset['train'].select(__a ),
'test': dataset['validation'],
} )
def tokenize_function(__a : List[Any] ):
# max_length=None => use the model max length (it's actually the default)
_a : Optional[int] = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=__a , max_length=__a )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_a : List[str] = datasets.map(
__a , batched=__a , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_a : List[Any] = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(__a : int ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_a : Dict = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_a : Tuple = 1_6
elif accelerator.mixed_precision != "no":
_a : List[Any] = 8
else:
_a : List[Any] = None
return tokenizer.pad(
__a , padding='longest' , max_length=__a , pad_to_multiple_of=__a , return_tensors='pt' , )
# Instantiate dataloaders.
_a : Any = DataLoader(
tokenized_datasets['train'] , shuffle=__a , collate_fn=__a , batch_size=__a )
_a : Optional[int] = DataLoader(
tokenized_datasets['validation'] , shuffle=__a , collate_fn=__a , batch_size=__a )
_a : Optional[Any] = DataLoader(
tokenized_datasets['test'] , shuffle=__a , collate_fn=__a , batch_size=__a )
return train_dataloader, eval_dataloader, test_dataloader
def UpperCAmelCase_ (__a : Any , __a : Union[str, Any] ):
"""simple docstring"""
_a : Dict = []
# Download the dataset
_a : Tuple = load_dataset('glue' , 'mrpc' )
# Create our splits
_a : Union[str, Any] = StratifiedKFold(n_splits=int(args.num_folds ) )
# Initialize accelerator
_a : Any = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_a : Optional[Any] = config['lr']
_a : Optional[int] = int(config['num_epochs'] )
_a : Dict = int(config['seed'] )
_a : Dict = int(config['batch_size'] )
_a : Optional[int] = evaluate.load('glue' , 'mrpc' )
# If the batch size is too big we use gradient accumulation
_a : List[Any] = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
_a : Any = batch_size // MAX_GPU_BATCH_SIZE
_a : List[str] = MAX_GPU_BATCH_SIZE
set_seed(__a )
# New Code #
# Create our folds:
_a : int = kfold.split(np.zeros(datasets['train'].num_rows ) , datasets['train']['label'] )
_a : Any = []
# Iterate over them
for i, (train_idxs, valid_idxs) in enumerate(__a ):
_a, _a, _a : Optional[Any] = get_fold_dataloaders(
__a , __a , __a , __a , )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_a : Dict = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=__a )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_a : List[Any] = model.to(accelerator.device )
# Instantiate optimizer
_a : List[str] = AdamW(params=model.parameters() , lr=__a )
# Instantiate scheduler
_a : List[Any] = get_linear_schedule_with_warmup(
optimizer=__a , num_warmup_steps=1_0_0 , num_training_steps=(len(__a ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_a, _a, _a, _a, _a : Union[str, Any] = accelerator.prepare(
__a , __a , __a , __a , __a )
# Now we train the model
for epoch in range(__a ):
model.train()
for step, batch in enumerate(__a ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
_a : Dict = model(**__a )
_a : int = outputs.loss
_a : Any = loss / gradient_accumulation_steps
accelerator.backward(__a )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__a ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_a : Union[str, Any] = model(**__a )
_a : Tuple = outputs.logits.argmax(dim=-1 )
_a, _a : Any = accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=__a , references=__a , )
_a : List[Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , __a )
# New Code #
# We also run predictions on the test set at the very end
_a : Any = []
for step, batch in enumerate(__a ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_a : Tuple = model(**__a )
_a : Dict = outputs.logits
_a, _a : Optional[int] = accelerator.gather_for_metrics((predictions, batch['labels']) )
fold_predictions.append(predictions.cpu() )
if i == 0:
# We need all of the test predictions
test_references.append(references.cpu() )
# Use accelerator.print to print only on the main process.
test_predictions.append(torch.cat(__a , dim=0 ) )
# We now need to release all our memory and get rid of the current model, optimizer, etc
accelerator.free_memory()
# New Code #
# Finally we check the accuracy of our folded results:
_a : Dict = torch.cat(__a , dim=0 )
_a : Any = torch.stack(__a , dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 )
_a : str = metric.compute(predictions=__a , references=__a )
accelerator.print('Average test metrics from all folds:' , __a )
def UpperCAmelCase_ ():
"""simple docstring"""
_a : Any = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' , type=__a , default=__a , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
# New Code #
parser.add_argument('--num_folds' , type=__a , default=3 , help='The number of splits to perform across the dataset' )
_a : Any = parser.parse_args()
_a : int = {'lr': 2e-5, 'num_epochs': 3, 'seed': 4_2, 'batch_size': 1_6}
training_function(__a , __a )
if __name__ == "__main__":
main()
| 271 | 1 |
'''simple docstring'''
import unittest
import numpy as np
def UpperCAmelCase_ (__a : np.ndarray , __a : np.ndarray , __a : np.ndarray , __a : np.ndarray | None = None , ):
"""simple docstring"""
_a : Optional[int] = np.shape(__a )
_a : Dict = np.shape(__a )
_a : List[Any] = np.shape(__a )
if shape_a[0] != shape_b[0]:
_a : int = (
'Expected the same number of rows for A and B. '
f"""Instead found A of size {shape_a} and B of size {shape_b}"""
)
raise ValueError(__a )
if shape_b[1] != shape_c[1]:
_a : Tuple = (
'Expected the same number of columns for B and C. '
f"""Instead found B of size {shape_b} and C of size {shape_c}"""
)
raise ValueError(__a )
_a : Optional[Any] = pseudo_inv
if a_inv is None:
try:
_a : Union[str, Any] = np.linalg.inv(__a )
except np.linalg.LinAlgError:
raise ValueError(
'Input matrix A is not invertible. Cannot compute Schur complement.' )
return mat_c - mat_b.T @ a_inv @ mat_b
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self : List[str] ):
'''simple docstring'''
_a : int = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
_a : Dict = np.array([[0, 3], [3, 0], [2, 3]] )
_a : List[str] = np.array([[2, 1], [6, 3]] )
_a : Any = schur_complement(_a ,_a ,_a )
_a : Any = np.block([[a, b], [b.T, c]] )
_a : str = np.linalg.det(_a )
_a : Dict = np.linalg.det(_a )
_a : Union[str, Any] = np.linalg.det(_a )
self.assertAlmostEqual(_a ,det_a * det_s )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
_a : Optional[Any] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
_a : Optional[Any] = np.array([[0, 3], [3, 0], [2, 3]] )
_a : str = np.array([[2, 1], [6, 3]] )
with self.assertRaises(_a ):
schur_complement(_a ,_a ,_a )
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
_a : Optional[Any] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
_a : Optional[Any] = np.array([[0, 3], [3, 0], [2, 3]] )
_a : Optional[Any] = np.array([[2, 1, 3], [6, 3, 5]] )
with self.assertRaises(_a ):
schur_complement(_a ,_a ,_a )
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main()
| 271 |
'''simple docstring'''
from __future__ import annotations
__lowerCAmelCase = [-1_0, -5, 0, 5, 5.1, 1_1, 1_3, 2_1, 3, 4, -2_1, -1_0, -5, -1, 0]
__lowerCAmelCase = [-5, 0, 5, 5.1, 1_1, 1_3, 2_1, -1, 4, -1, -1_0, -5, -1, 0, -1]
def UpperCAmelCase_ (__a : list[float] ):
"""simple docstring"""
_a : Optional[int] = []
_a : int = len(__a )
for i in range(__a ):
_a : float = -1
for j in range(i + 1 , __a ):
if arr[i] < arr[j]:
_a : Any = arr[j]
break
result.append(__a )
return result
def UpperCAmelCase_ (__a : list[float] ):
"""simple docstring"""
_a : Tuple = []
for i, outer in enumerate(__a ):
_a : float = -1
for inner in arr[i + 1 :]:
if outer < inner:
_a : Dict = inner
break
result.append(__a )
return result
def UpperCAmelCase_ (__a : list[float] ):
"""simple docstring"""
_a : int = len(__a )
_a : list[float] = []
_a : list[float] = [-1] * arr_size
for index in reversed(range(__a ) ):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
_a : Dict = stack[-1]
stack.append(arr[index] )
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
__lowerCAmelCase = (
"""from __main__ import arr, next_greatest_element_slow, """
"""next_greatest_element_fast, next_greatest_element"""
)
print(
"""next_greatest_element_slow():""",
timeit("""next_greatest_element_slow(arr)""", setup=setup),
)
print(
"""next_greatest_element_fast():""",
timeit("""next_greatest_element_fast(arr)""", setup=setup),
)
print(
""" next_greatest_element():""",
timeit("""next_greatest_element(arr)""", setup=setup),
)
| 271 | 1 |
'''simple docstring'''
import os
def UpperCAmelCase_ (__a : List[str] ):
"""simple docstring"""
_a : Dict = len(grid[0] )
_a : Dict = len(__a )
_a : Dict = 0
_a : Optional[int] = 0
_a : Any = 0
# Check vertically, horizontally, diagonally at the same time (only works
# for nxn grid)
for i in range(__a ):
for j in range(n_rows - 3 ):
_a : int = grid[j][i] * grid[j + 1][i] * grid[j + 2][i] * grid[j + 3][i]
_a : Any = grid[i][j] * grid[i][j + 1] * grid[i][j + 2] * grid[i][j + 3]
# Left-to-right diagonal (\) product
if i < n_columns - 3:
_a : Union[str, Any] = (
grid[i][j]
* grid[i + 1][j + 1]
* grid[i + 2][j + 2]
* grid[i + 3][j + 3]
)
# Right-to-left diagonal(/) product
if i > 2:
_a : Tuple = (
grid[i][j]
* grid[i - 1][j + 1]
* grid[i - 2][j + 2]
* grid[i - 3][j + 3]
)
_a : Union[str, Any] = max(
__a , __a , __a , __a )
if max_product > largest:
_a : Any = max_product
return largest
def UpperCAmelCase_ ():
"""simple docstring"""
_a : Any = []
with open(os.path.dirname(__a ) + '/grid.txt' ) as file:
for line in file:
grid.append(line.strip('\n' ).split(' ' ) )
_a : Any = [[int(__a ) for i in grid[j]] for j in range(len(__a ) )]
return largest_product(__a )
if __name__ == "__main__":
print(solution())
| 271 |
'''simple docstring'''
import os
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home
__lowerCAmelCase = HUGGINGFACE_HUB_CACHE
__lowerCAmelCase = """config.json"""
__lowerCAmelCase = """diffusion_pytorch_model.bin"""
__lowerCAmelCase = """diffusion_flax_model.msgpack"""
__lowerCAmelCase = """model.onnx"""
__lowerCAmelCase = """diffusion_pytorch_model.safetensors"""
__lowerCAmelCase = """weights.pb"""
__lowerCAmelCase = """https://huggingface.co"""
__lowerCAmelCase = default_cache_path
__lowerCAmelCase = """diffusers_modules"""
__lowerCAmelCase = os.getenv("""HF_MODULES_CACHE""", os.path.join(hf_cache_home, """modules"""))
__lowerCAmelCase = ["""fp16""", """non-ema"""]
__lowerCAmelCase = """.self_attn"""
| 271 | 1 |
'''simple docstring'''
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
__lowerCAmelCase = {
"""distilbert""": (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
"""roberta""": (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
"""bert""": (BertConfig, BertForMaskedLM, BertTokenizer),
"""gpt2""": (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def UpperCAmelCase_ (__a : Optional[Any] ):
"""simple docstring"""
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def UpperCAmelCase_ (__a : Optional[int] , __a : Union[str, Any] ):
"""simple docstring"""
if args.student_type == "roberta":
_a : Optional[Any] = False
elif args.student_type == "gpt2":
_a : int = False
def UpperCAmelCase_ (__a : Any , __a : Dict ):
"""simple docstring"""
if args.student_type == "roberta":
_a : Dict = False
def UpperCAmelCase_ ():
"""simple docstring"""
_a : Optional[int] = argparse.ArgumentParser(description='Training' )
parser.add_argument('--force' , action='store_true' , help='Overwrite dump_path if it already exists.' )
parser.add_argument(
'--dump_path' , type=__a , required=__a , help='The output directory (log, checkpoints, parameters, etc.)' )
parser.add_argument(
'--data_file' , type=__a , required=__a , help='The binarized file (tokenized + tokens_to_ids) and grouped by sequence.' , )
parser.add_argument(
'--student_type' , type=__a , choices=['distilbert', 'roberta', 'gpt2'] , required=__a , help='The student type (DistilBERT, RoBERTa).' , )
parser.add_argument('--student_config' , type=__a , required=__a , help='Path to the student configuration.' )
parser.add_argument(
'--student_pretrained_weights' , default=__a , type=__a , help='Load student initialization checkpoint.' )
parser.add_argument(
'--teacher_type' , choices=['bert', 'roberta', 'gpt2'] , required=__a , help='Teacher type (BERT, RoBERTa).' )
parser.add_argument('--teacher_name' , type=__a , required=__a , help='The teacher model.' )
parser.add_argument('--temperature' , default=2.0 , type=__a , help='Temperature for the softmax temperature.' )
parser.add_argument(
'--alpha_ce' , default=0.5 , type=__a , help='Linear weight for the distillation loss. Must be >=0.' )
parser.add_argument(
'--alpha_mlm' , default=0.0 , type=__a , help='Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.' , )
parser.add_argument('--alpha_clm' , default=0.5 , type=__a , help='Linear weight for the CLM loss. Must be >=0.' )
parser.add_argument('--alpha_mse' , default=0.0 , type=__a , help='Linear weight of the MSE loss. Must be >=0.' )
parser.add_argument(
'--alpha_cos' , default=0.0 , type=__a , help='Linear weight of the cosine embedding loss. Must be >=0.' )
parser.add_argument(
'--mlm' , action='store_true' , help='The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.' )
parser.add_argument(
'--mlm_mask_prop' , default=0.15 , type=__a , help='Proportion of tokens for which we need to make a prediction.' , )
parser.add_argument('--word_mask' , default=0.8 , type=__a , help='Proportion of tokens to mask out.' )
parser.add_argument('--word_keep' , default=0.1 , type=__a , help='Proportion of tokens to keep.' )
parser.add_argument('--word_rand' , default=0.1 , type=__a , help='Proportion of tokens to randomly replace.' )
parser.add_argument(
'--mlm_smoothing' , default=0.7 , type=__a , help='Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).' , )
parser.add_argument('--token_counts' , type=__a , help='The token counts in the data_file for MLM.' )
parser.add_argument(
'--restrict_ce_to_mask' , action='store_true' , help='If true, compute the distillation loss only the [MLM] prediction distribution.' , )
parser.add_argument(
'--freeze_pos_embs' , action='store_true' , help='Freeze positional embeddings during distillation. For student_type in [\'roberta\', \'gpt2\'] only.' , )
parser.add_argument(
'--freeze_token_type_embds' , action='store_true' , help='Freeze token type embeddings during distillation if existent. For student_type in [\'roberta\'] only.' , )
parser.add_argument('--n_epoch' , type=__a , default=3 , help='Number of pass on the whole dataset.' )
parser.add_argument('--batch_size' , type=__a , default=5 , help='Batch size (for each process).' )
parser.add_argument(
'--group_by_size' , action='store_false' , help='If true, group sequences that have similar length into the same batch. Default is true.' , )
parser.add_argument(
'--gradient_accumulation_steps' , type=__a , default=5_0 , help='Gradient accumulation for larger training batches.' , )
parser.add_argument('--warmup_prop' , default=0.05 , type=__a , help='Linear warmup proportion.' )
parser.add_argument('--weight_decay' , default=0.0 , type=__a , help='Weight decay if we apply some.' )
parser.add_argument('--learning_rate' , default=5e-4 , type=__a , help='The initial learning rate for Adam.' )
parser.add_argument('--adam_epsilon' , default=1e-6 , type=__a , help='Epsilon for Adam optimizer.' )
parser.add_argument('--max_grad_norm' , default=5.0 , type=__a , help='Max gradient norm.' )
parser.add_argument('--initializer_range' , default=0.02 , type=__a , help='Random initialization range.' )
parser.add_argument(
'--fp16' , action='store_true' , help='Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit' , )
parser.add_argument(
'--fp16_opt_level' , type=__a , default='O1' , help=(
'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'
'See details at https://nvidia.github.io/apex/amp.html'
) , )
parser.add_argument('--n_gpu' , type=__a , default=1 , help='Number of GPUs in the node.' )
parser.add_argument('--local_rank' , type=__a , default=-1 , help='Distributed training - Local rank' )
parser.add_argument('--seed' , type=__a , default=5_6 , help='Random seed' )
parser.add_argument('--log_interval' , type=__a , default=5_0_0 , help='Tensorboard logging interval.' )
parser.add_argument('--checkpoint_interval' , type=__a , default=4_0_0_0 , help='Checkpoint interval.' )
_a : Any = parser.parse_args()
sanity_checks(__a )
# ARGS #
init_gpu_params(__a )
set_seed(__a )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
f"""Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite"""
' itUse `--force` if you want to overwrite it' )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(f"""Experiment will be dumped and logged in {args.dump_path}""" )
# SAVE PARAMS #
logger.info(f"""Param: {args}""" )
with open(os.path.join(args.dump_path , 'parameters.json' ) , 'w' ) as f:
json.dump(vars(__a ) , __a , indent=4 )
git_log(args.dump_path )
_a, _a, _a : Union[str, Any] = MODEL_CLASSES[args.student_type]
_a, _a, _a : Tuple = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
_a : List[str] = teacher_tokenizer_class.from_pretrained(args.teacher_name )
_a : Optional[int] = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
_a : Any = tokenizer.all_special_tokens.index(__a )
_a : int = tokenizer.all_special_ids[idx]
logger.info(f"""Special tokens {special_tok_ids}""" )
_a : List[str] = special_tok_ids
_a : Any = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(f"""Loading data from {args.data_file}""" )
with open(args.data_file , 'rb' ) as fp:
_a : Optional[int] = pickle.load(__a )
if args.mlm:
logger.info(f"""Loading token counts from {args.token_counts} (already pre-computed)""" )
with open(args.token_counts , 'rb' ) as fp:
_a : Tuple = pickle.load(__a )
_a : Union[str, Any] = np.maximum(__a , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
_a : str = 0.0 # do not predict special tokens
_a : Optional[int] = torch.from_numpy(__a )
else:
_a : Optional[Any] = None
_a : Union[str, Any] = LmSeqsDataset(params=__a , data=__a )
logger.info('Data loader created.' )
# STUDENT #
logger.info(f"""Loading student config from {args.student_config}""" )
_a : Dict = student_config_class.from_pretrained(args.student_config )
_a : int = True
if args.student_pretrained_weights is not None:
logger.info(f"""Loading pretrained weights from {args.student_pretrained_weights}""" )
_a : List[str] = student_model_class.from_pretrained(args.student_pretrained_weights , config=__a )
else:
_a : List[str] = student_model_class(__a )
if args.n_gpu > 0:
student.to(f"""cuda:{args.local_rank}""" )
logger.info('Student loaded.' )
# TEACHER #
_a : int = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=__a )
if args.n_gpu > 0:
teacher.to(f"""cuda:{args.local_rank}""" )
logger.info(f"""Teacher loaded from {args.teacher_name}.""" )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(__a , __a )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(__a , __a )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
_a : Dict = Distiller(
params=__a , dataset=__a , token_probs=__a , student=__a , teacher=__a )
distiller.train()
logger.info('Let\'s go get some drinks.' )
if __name__ == "__main__":
main()
| 271 |
'''simple docstring'''
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self : int ,_a : Any ,_a : Optional[int]=2 ,_a : Optional[Any]=True ,_a : Dict=False ,_a : Dict=10 ,_a : Any=3 ,_a : str=32 * 8 ,_a : Optional[int]=32 * 8 ,_a : int=4 ,_a : str=64 ,):
'''simple docstring'''
_a : Dict = parent
_a : Union[str, Any] = batch_size
_a : Tuple = is_training
_a : List[str] = use_auxiliary_loss
_a : Optional[Any] = num_queries
_a : str = num_channels
_a : List[str] = min_size
_a : int = max_size
_a : Optional[int] = num_labels
_a : List[str] = hidden_dim
_a : int = hidden_dim
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
_a : Tuple = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
_a )
_a : Optional[Any] = torch.ones([self.batch_size, self.min_size, self.max_size] ,device=_a )
_a : Union[str, Any] = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] ,device=_a ) > 0.5
).float()
_a : Tuple = (torch.rand((self.batch_size, self.num_labels) ,device=_a ) > 0.5).long()
_a : Dict = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
_a : int = MaskaFormerConfig(
hidden_size=self.hidden_dim ,)
_a : str = self.num_queries
_a : Union[str, Any] = self.num_labels
_a : Tuple = [1, 1, 1, 1]
_a : Dict = self.num_channels
_a : str = 64
_a : Tuple = 128
_a : Optional[Any] = self.hidden_dim
_a : Union[str, Any] = self.hidden_dim
_a : List[Any] = self.hidden_dim
return config
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
_a, _a, _a, _a, _a : Optional[Any] = self.prepare_config_and_inputs()
_a : str = {'pixel_values': pixel_values, 'pixel_mask': pixel_mask}
return config, inputs_dict
def __lowercase ( self : List[str] ,_a : Optional[Any] ,_a : str ):
'''simple docstring'''
_a : str = output.encoder_hidden_states
_a : Any = output.pixel_decoder_hidden_states
_a : Optional[Any] = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(_a ) ,len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_a ) ,len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_a ) ,config.decoder_layers )
def __lowercase ( self : List[str] ,_a : str ,_a : List[Any] ,_a : Any ,_a : Union[str, Any]=False ):
'''simple docstring'''
with torch.no_grad():
_a : str = MaskaFormerModel(config=_a )
model.to(_a )
model.eval()
_a : Any = model(pixel_values=_a ,pixel_mask=_a )
_a : Optional[Any] = model(_a ,output_hidden_states=_a )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape ,(self.batch_size, self.num_queries, self.hidden_dim) ,)
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(_a ,_a )
def __lowercase ( self : Tuple ,_a : List[Any] ,_a : Union[str, Any] ,_a : Tuple ,_a : List[str] ,_a : Any ):
'''simple docstring'''
_a : int = MaskaFormerForUniversalSegmentation(config=_a )
model.to(_a )
model.eval()
def comm_check_on_output(_a : Any ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape ,(self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) ,)
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape ,(self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
_a : Any = model(pixel_values=_a ,pixel_mask=_a )
_a : Optional[int] = model(_a )
comm_check_on_output(_a )
_a : List[str] = model(
pixel_values=_a ,pixel_mask=_a ,mask_labels=_a ,class_labels=_a )
comm_check_on_output(_a )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape ,torch.Size([1] ) )
@require_torch
class UpperCAmelCase__ ( lowercase__ , lowercase__ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
__UpperCAmelCase : Dict = {'''feature-extraction''': MaskaFormerModel} if is_torch_available() else {}
__UpperCAmelCase : Dict = False
__UpperCAmelCase : Tuple = False
__UpperCAmelCase : Dict = False
__UpperCAmelCase : List[Any] = False
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
_a : Union[str, Any] = MaskaFormerModelTester(self )
_a : Dict = ConfigTester(self ,config_class=_a ,has_text_modality=_a )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
_a, _a : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(_a ,**_a ,output_hidden_states=_a )
def __lowercase ( self : str ):
'''simple docstring'''
_a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*_a )
@unittest.skip(reason='Mask2Former does not use inputs_embeds' )
def __lowercase ( self : Any ):
'''simple docstring'''
pass
@unittest.skip(reason='Mask2Former does not have a get_input_embeddings method' )
def __lowercase ( self : str ):
'''simple docstring'''
pass
@unittest.skip(reason='Mask2Former is not a generative model' )
def __lowercase ( self : List[Any] ):
'''simple docstring'''
pass
@unittest.skip(reason='Mask2Former does not use token embeddings' )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(
reason='Mask2Former has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def __lowercase ( self : Dict ):
'''simple docstring'''
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def __lowercase ( self : List[Any] ):
'''simple docstring'''
pass
def __lowercase ( self : int ):
'''simple docstring'''
_a, _a : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : Union[str, Any] = model_class(_a )
_a : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a : Optional[Any] = [*signature.parameters.keys()]
_a : List[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] ,_a )
@slow
def __lowercase ( self : List[str] ):
'''simple docstring'''
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
_a : Dict = MaskaFormerModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def __lowercase ( self : List[Any] ):
'''simple docstring'''
_a : int = (self.model_tester.min_size,) * 2
_a : Any = {
'pixel_values': torch.randn((2, 3, *size) ,device=_a ),
'mask_labels': torch.randn((2, 10, *size) ,device=_a ),
'class_labels': torch.zeros(2 ,10 ,device=_a ).long(),
}
_a : List[Any] = self.model_tester.get_config()
_a : int = MaskaFormerForUniversalSegmentation(_a ).to(_a )
_a : str = model(**_a )
self.assertTrue(outputs.loss is not None )
def __lowercase ( self : List[str] ):
'''simple docstring'''
_a, _a : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(_a ,**_a ,output_hidden_states=_a )
def __lowercase ( self : int ):
'''simple docstring'''
_a, _a : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : Any = model_class(_a ).to(_a )
_a : Optional[int] = model(**_a ,output_attentions=_a )
self.assertTrue(outputs.attentions is not None )
def __lowercase ( self : Tuple ):
'''simple docstring'''
if not self.model_tester.is_training:
return
_a : List[str] = self.all_model_classes[1]
_a, _a, _a, _a, _a : List[str] = self.model_tester.prepare_config_and_inputs()
_a : Any = model_class(_a )
model.to(_a )
model.train()
_a : Union[str, Any] = model(_a ,mask_labels=_a ,class_labels=_a ).loss
loss.backward()
def __lowercase ( self : int ):
'''simple docstring'''
_a : int = self.all_model_classes[1]
_a, _a, _a, _a, _a : List[Any] = self.model_tester.prepare_config_and_inputs()
_a : str = True
_a : str = True
_a : List[str] = model_class(_a ).to(_a )
model.train()
_a : Optional[int] = model(_a ,mask_labels=_a ,class_labels=_a )
_a : Tuple = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
_a : str = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
_a : Dict = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
_a : List[str] = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=_a )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
__lowerCAmelCase = 1e-4
def UpperCAmelCase_ ():
"""simple docstring"""
_a : int = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_vision
@slow
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def __lowercase ( self : Any ):
'''simple docstring'''
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def __lowercase ( self : Any ):
'''simple docstring'''
_a : List[str] = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(_a )
_a : int = self.default_image_processor
_a : Tuple = prepare_img()
_a : Any = image_processor(_a ,return_tensors='pt' ).to(_a )
_a : Union[str, Any] = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_a ,(1, 3, 384, 384) )
with torch.no_grad():
_a : Optional[Any] = model(**_a )
_a : List[Any] = torch.tensor(
[[-0.2790, -1.0717, -1.1668], [-0.5128, -0.3128, -0.4987], [-0.5832, 0.1971, -0.0197]] ).to(_a )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] ,_a ,atol=_a ) )
_a : str = torch.tensor(
[[0.8973, 1.1847, 1.1776], [1.1934, 1.5040, 1.5128], [1.1153, 1.4486, 1.4951]] ).to(_a )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] ,_a ,atol=_a ) )
_a : Any = torch.tensor(
[[2.1152, 1.7000, -0.8603], [1.5808, 1.8004, -0.9353], [1.6043, 1.7495, -0.5999]] ).to(_a )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] ,_a ,atol=_a ) )
def __lowercase ( self : Tuple ):
'''simple docstring'''
_a : List[Any] = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(_a ).eval()
_a : Optional[Any] = self.default_image_processor
_a : List[Any] = prepare_img()
_a : str = image_processor(_a ,return_tensors='pt' ).to(_a )
_a : Any = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_a ,(1, 3, 384, 384) )
with torch.no_grad():
_a : Optional[int] = model(**_a )
# masks_queries_logits
_a : Dict = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape ,(1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
_a : Dict = [
[-8.7839, -9.0056, -8.8121],
[-7.4104, -7.0313, -6.5401],
[-6.6105, -6.3427, -6.4675],
]
_a : Optional[Any] = torch.tensor(_a ).to(_a )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] ,_a ,atol=_a ) )
# class_queries_logits
_a : str = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape ,(1, model.config.num_queries, model.config.num_labels + 1) )
_a : str = torch.tensor(
[
[1.8324, -8.0835, -4.1922],
[0.8450, -9.0050, -3.6053],
[0.3045, -7.7293, -3.0275],
] ).to(_a )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] ,_a ,atol=_a ) )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
_a : Any = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(_a ).eval()
_a : Tuple = self.default_image_processor
_a : Tuple = image_processor(
[np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] ,segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] ,return_tensors='pt' ,)
_a : str = inputs['pixel_values'].to(_a )
_a : str = [el.to(_a ) for el in inputs['mask_labels']]
_a : Dict = [el.to(_a ) for el in inputs['class_labels']]
with torch.no_grad():
_a : List[str] = model(**_a )
self.assertTrue(outputs.loss is not None )
| 271 | 1 |
'''simple docstring'''
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError("""At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training""")
# TF training parameters
__lowerCAmelCase = False
__lowerCAmelCase = False
def UpperCAmelCase_ (__a : Namespace ):
"""simple docstring"""
return TrainCommand(__a )
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
@staticmethod
def __lowercase ( _a : ArgumentParser ):
'''simple docstring'''
_a : Optional[Any] = parser.add_parser('train' ,help='CLI tool to train a model on a task.' )
train_parser.add_argument(
'--train_data' ,type=_a ,required=_a ,help='path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.' ,)
train_parser.add_argument(
'--column_label' ,type=_a ,default=0 ,help='Column of the dataset csv file with example labels.' )
train_parser.add_argument(
'--column_text' ,type=_a ,default=1 ,help='Column of the dataset csv file with example texts.' )
train_parser.add_argument(
'--column_id' ,type=_a ,default=2 ,help='Column of the dataset csv file with example ids.' )
train_parser.add_argument(
'--skip_first_row' ,action='store_true' ,help='Skip the first row of the csv file (headers).' )
train_parser.add_argument('--validation_data' ,type=_a ,default='' ,help='path to validation dataset.' )
train_parser.add_argument(
'--validation_split' ,type=_a ,default=0.1 ,help='if validation dataset is not provided, fraction of train dataset to use as validation dataset.' ,)
train_parser.add_argument('--output' ,type=_a ,default='./' ,help='path to saved the trained model.' )
train_parser.add_argument(
'--task' ,type=_a ,default='text_classification' ,help='Task to train the model on.' )
train_parser.add_argument(
'--model' ,type=_a ,default='bert-base-uncased' ,help='Model\'s name or path to stored model.' )
train_parser.add_argument('--train_batch_size' ,type=_a ,default=32 ,help='Batch size for training.' )
train_parser.add_argument('--valid_batch_size' ,type=_a ,default=64 ,help='Batch size for validation.' )
train_parser.add_argument('--learning_rate' ,type=_a ,default=3E-5 ,help='Learning rate.' )
train_parser.add_argument('--adam_epsilon' ,type=_a ,default=1E-08 ,help='Epsilon for Adam optimizer.' )
train_parser.set_defaults(func=_a )
def __init__( self : int ,_a : Namespace ):
'''simple docstring'''
_a : Union[str, Any] = logging.get_logger('transformers-cli/training' )
_a : int = 'tf' if is_tf_available() else 'torch'
os.makedirs(args.output ,exist_ok=_a )
_a : List[str] = args.output
_a : Optional[Any] = args.column_label
_a : Any = args.column_text
_a : Union[str, Any] = args.column_id
self.logger.info(F"""Loading {args.task} pipeline for {args.model}""" )
if args.task == "text_classification":
_a : Dict = TextClassificationPipeline.from_pretrained(args.model )
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(F"""Loading dataset from {args.train_data}""" )
_a : Any = Processor.create_from_csv(
args.train_data ,column_label=args.column_label ,column_text=args.column_text ,column_id=args.column_id ,skip_first_row=args.skip_first_row ,)
_a : Tuple = None
if args.validation_data:
self.logger.info(F"""Loading validation dataset from {args.validation_data}""" )
_a : Optional[int] = Processor.create_from_csv(
args.validation_data ,column_label=args.column_label ,column_text=args.column_text ,column_id=args.column_id ,skip_first_row=args.skip_first_row ,)
_a : Any = args.validation_split
_a : Optional[Any] = args.train_batch_size
_a : Dict = args.valid_batch_size
_a : Optional[int] = args.learning_rate
_a : int = args.adam_epsilon
def __lowercase ( self : Any ):
'''simple docstring'''
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def __lowercase ( self : Tuple ):
'''simple docstring'''
raise NotImplementedError
def __lowercase ( self : List[str] ):
'''simple docstring'''
self.pipeline.fit(
self.train_dataset ,validation_data=self.valid_dataset ,validation_split=self.validation_split ,learning_rate=self.learning_rate ,adam_epsilon=self.adam_epsilon ,train_batch_size=self.train_batch_size ,valid_batch_size=self.valid_batch_size ,)
# Save trained pipeline
self.pipeline.save_pretrained(self.output )
| 271 |
'''simple docstring'''
import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def UpperCAmelCase_ (__a : List[Any] ):
"""simple docstring"""
if (
(cp >= 0x4E_00 and cp <= 0x9F_FF)
or (cp >= 0x34_00 and cp <= 0x4D_BF) #
or (cp >= 0x2_00_00 and cp <= 0x2_A6_DF) #
or (cp >= 0x2_A7_00 and cp <= 0x2_B7_3F) #
or (cp >= 0x2_B7_40 and cp <= 0x2_B8_1F) #
or (cp >= 0x2_B8_20 and cp <= 0x2_CE_AF) #
or (cp >= 0xF9_00 and cp <= 0xFA_FF)
or (cp >= 0x2_F8_00 and cp <= 0x2_FA_1F) #
): #
return True
return False
def UpperCAmelCase_ (__a : str ):
"""simple docstring"""
for char in word:
_a : Union[str, Any] = ord(__a )
if not _is_chinese_char(__a ):
return 0
return 1
def UpperCAmelCase_ (__a : List[str] ):
"""simple docstring"""
_a : Dict = set()
for token in tokens:
_a : str = len(__a ) > 1 and is_chinese(__a )
if chinese_word:
word_set.add(__a )
_a : Optional[Any] = list(__a )
return word_list
def UpperCAmelCase_ (__a : List[str] , __a : set() ):
"""simple docstring"""
if not chinese_word_set:
return bert_tokens
_a : Optional[Any] = max([len(__a ) for w in chinese_word_set] )
_a : Optional[int] = bert_tokens
_a, _a : Any = 0, len(__a )
while start < end:
_a : Tuple = True
if is_chinese(bert_word[start] ):
_a : Union[str, Any] = min(end - start , __a )
for i in range(__a , 1 , -1 ):
_a : Optional[Any] = ''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
_a : Any = '##' + bert_word[j]
_a : Union[str, Any] = start + i
_a : int = False
break
if single_word:
start += 1
return bert_word
def UpperCAmelCase_ (__a : List[str] , __a : LTP , __a : BertTokenizer ):
"""simple docstring"""
_a : int = []
for i in range(0 , len(__a ) , 1_0_0 ):
_a : Union[str, Any] = ltp_tokenizer.seg(lines[i : i + 1_0_0] )[0]
_a : Optional[Any] = [get_chinese_word(__a ) for r in res]
ltp_res.extend(__a )
assert len(__a ) == len(__a )
_a : str = []
for i in range(0 , len(__a ) , 1_0_0 ):
_a : List[str] = bert_tokenizer(lines[i : i + 1_0_0] , add_special_tokens=__a , truncation=__a , max_length=5_1_2 )
bert_res.extend(res['input_ids'] )
assert len(__a ) == len(__a )
_a : List[str] = []
for input_ids, chinese_word in zip(__a , __a ):
_a : int = []
for id in input_ids:
_a : Optional[int] = bert_tokenizer._convert_id_to_token(__a )
input_tokens.append(__a )
_a : List[str] = add_sub_symbol(__a , __a )
_a : Tuple = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(__a ):
if token[:2] == "##":
_a : str = token[2:]
# save chinese tokens' pos
if len(__a ) == 1 and _is_chinese_char(ord(__a ) ):
ref_id.append(__a )
ref_ids.append(__a )
assert len(__a ) == len(__a )
return ref_ids
def UpperCAmelCase_ (__a : Optional[Any] ):
"""simple docstring"""
with open(args.file_name , 'r' , encoding='utf-8' ) as f:
_a : Dict = f.readlines()
_a : int = [line.strip() for line in data if len(__a ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
_a : int = LTP(args.ltp ) # faster in GPU device
_a : Tuple = BertTokenizer.from_pretrained(args.bert )
_a : int = prepare_ref(__a , __a , __a )
with open(args.save_path , 'w' , encoding='utf-8' ) as f:
_a : Optional[Any] = [json.dumps(__a ) + '\n' for ref in ref_ids]
f.writelines(__a )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser(description="""prepare_chinese_ref""")
parser.add_argument(
"""--file_name""",
type=str,
default="""./resources/chinese-demo.txt""",
help="""file need process, same as training data in lm""",
)
parser.add_argument(
"""--ltp""", type=str, default="""./resources/ltp""", help="""resources for LTP tokenizer, usually a path"""
)
parser.add_argument("""--bert""", type=str, default="""./resources/robert""", help="""resources for Bert tokenizer""")
parser.add_argument("""--save_path""", type=str, default="""./resources/ref.txt""", help="""path to save res""")
__lowerCAmelCase = parser.parse_args()
main(args)
| 271 | 1 |
'''simple docstring'''
from collections.abc import Generator
from math import sin
def UpperCAmelCase_ (__a : bytes ):
"""simple docstring"""
if len(__a ) != 3_2:
raise ValueError('Input must be of length 32' )
_a : List[Any] = b''
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def UpperCAmelCase_ (__a : int ):
"""simple docstring"""
if i < 0:
raise ValueError('Input must be non-negative' )
_a : List[str] = format(__a , '08x' )[-8:]
_a : Dict = b''
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode('utf-8' )
return little_endian_hex
def UpperCAmelCase_ (__a : bytes ):
"""simple docstring"""
_a : List[Any] = b''
for char in message:
bit_string += format(__a , '08b' ).encode('utf-8' )
_a : List[str] = format(len(__a ) , '064b' ).encode('utf-8' )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(__a ) % 5_1_2 != 4_4_8:
bit_string += b"0"
bit_string += to_little_endian(start_len[3_2:] ) + to_little_endian(start_len[:3_2] )
return bit_string
def UpperCAmelCase_ (__a : bytes ):
"""simple docstring"""
if len(__a ) % 5_1_2 != 0:
raise ValueError('Input must have length that\'s a multiple of 512' )
for pos in range(0 , len(__a ) , 5_1_2 ):
_a : Optional[Any] = bit_string[pos : pos + 5_1_2]
_a : Optional[Any] = []
for i in range(0 , 5_1_2 , 3_2 ):
block_words.append(int(to_little_endian(block[i : i + 3_2] ) , 2 ) )
yield block_words
def UpperCAmelCase_ (__a : int ):
"""simple docstring"""
if i < 0:
raise ValueError('Input must be non-negative' )
_a : Union[str, Any] = format(__a , '032b' )
_a : Dict = ''
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(__a , 2 )
def UpperCAmelCase_ (__a : int , __a : int ):
"""simple docstring"""
return (a + b) % 2**3_2
def UpperCAmelCase_ (__a : int , __a : int ):
"""simple docstring"""
if i < 0:
raise ValueError('Input must be non-negative' )
if shift < 0:
raise ValueError('Shift must be non-negative' )
return ((i << shift) ^ (i >> (3_2 - shift))) % 2**3_2
def UpperCAmelCase_ (__a : bytes ):
"""simple docstring"""
_a : Optional[Any] = preprocess(__a )
_a : List[str] = [int(2**3_2 * abs(sin(i + 1 ) ) ) for i in range(6_4 )]
# Starting states
_a : str = 0x67_45_23_01
_a : Dict = 0xEF_CD_AB_89
_a : Union[str, Any] = 0x98_BA_DC_FE
_a : List[str] = 0x10_32_54_76
_a : str = [
7,
1_2,
1_7,
2_2,
7,
1_2,
1_7,
2_2,
7,
1_2,
1_7,
2_2,
7,
1_2,
1_7,
2_2,
5,
9,
1_4,
2_0,
5,
9,
1_4,
2_0,
5,
9,
1_4,
2_0,
5,
9,
1_4,
2_0,
4,
1_1,
1_6,
2_3,
4,
1_1,
1_6,
2_3,
4,
1_1,
1_6,
2_3,
4,
1_1,
1_6,
2_3,
6,
1_0,
1_5,
2_1,
6,
1_0,
1_5,
2_1,
6,
1_0,
1_5,
2_1,
6,
1_0,
1_5,
2_1,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(__a ):
_a : Union[str, Any] = aa
_a : List[str] = ba
_a : List[Any] = ca
_a : Tuple = da
# Hash current chunk
for i in range(6_4 ):
if i <= 1_5:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
_a : Dict = d ^ (b & (c ^ d))
_a : Any = i
elif i <= 3_1:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
_a : Dict = c ^ (d & (b ^ c))
_a : Union[str, Any] = (5 * i + 1) % 1_6
elif i <= 4_7:
_a : List[str] = b ^ c ^ d
_a : str = (3 * i + 5) % 1_6
else:
_a : Dict = c ^ (b | not_aa(__a ))
_a : Dict = (7 * i) % 1_6
_a : Optional[int] = (f + a + added_consts[i] + block_words[g]) % 2**3_2
_a : Union[str, Any] = d
_a : str = c
_a : List[Any] = b
_a : Any = sum_aa(__a , left_rotate_aa(__a , shift_amounts[i] ) )
# Add hashed chunk to running total
_a : Optional[int] = sum_aa(__a , __a )
_a : Any = sum_aa(__a , __a )
_a : Tuple = sum_aa(__a , __a )
_a : Dict = sum_aa(__a , __a )
_a : str = reformat_hex(__a ) + reformat_hex(__a ) + reformat_hex(__a ) + reformat_hex(__a )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 271 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_videomae import VideoMAEImageProcessor
__lowerCAmelCase = logging.get_logger(__name__)
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
def __init__( self : Tuple ,*_a : List[str] ,**_a : Any ):
'''simple docstring'''
warnings.warn(
'The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use VideoMAEImageProcessor instead.' ,_a ,)
super().__init__(*_a ,**_a )
| 271 | 1 |
'''simple docstring'''
def UpperCAmelCase_ (__a : int ):
"""simple docstring"""
if not isinstance(__a , __a ):
raise TypeError('only integers accepted as input' )
else:
_a : int = str(abs(__a ) )
_a : Any = [list(__a ) for char in range(len(__a ) )]
for index in range(len(__a ) ):
num_transpositions[index].pop(__a )
return max(
int(''.join(list(__a ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__("""doctest""").testmod()
| 271 |
'''simple docstring'''
from __future__ import annotations
from random import choice
def UpperCAmelCase_ (__a : str ):
"""simple docstring"""
return choice(__a )
def UpperCAmelCase_ (__a : list[int] , __a : int ):
"""simple docstring"""
_a : Dict = random_pivot(__a )
# partition based on pivot
# linear time
_a : Optional[int] = [e for e in lst if e < pivot]
_a : List[str] = [e for e in lst if e > pivot]
# if we get lucky, pivot might be the element we want.
# we can easily see this:
# small (elements smaller than k)
# + pivot (kth element)
# + big (elements larger than k)
if len(__a ) == k - 1:
return pivot
# pivot is in elements bigger than k
elif len(__a ) < k - 1:
return kth_number(__a , k - len(__a ) - 1 )
# pivot is in elements smaller than k
else:
return kth_number(__a , __a )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 271 | 1 |
'''simple docstring'''
from itertools import count
def UpperCAmelCase_ (__a : int = 5_0 ):
"""simple docstring"""
_a : int = [1] * min_block_length
for n in count(__a ):
fill_count_functions.append(1 )
for block_length in range(__a , n + 1 ):
for block_start in range(n - block_length ):
fill_count_functions[n] += fill_count_functions[
n - block_start - block_length - 1
]
fill_count_functions[n] += 1
if fill_count_functions[n] > 1_0_0_0_0_0_0:
break
return n
if __name__ == "__main__":
print(f'''{solution() = }''')
| 271 |
'''simple docstring'''
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self : Dict ):
'''simple docstring'''
_a : Dict = {}
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
print(self.vertex )
for i in self.vertex:
print(_a ,' -> ' ,' -> '.join([str(_a ) for j in self.vertex[i]] ) )
def __lowercase ( self : Dict ,_a : int ,_a : int ):
'''simple docstring'''
if from_vertex in self.vertex:
self.vertex[from_vertex].append(_a )
else:
# else make a new vertex
_a : int = [to_vertex]
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
_a : Tuple = [False] * len(self.vertex )
# call the recursive helper function
for i in range(len(self.vertex ) ):
if not visited[i]:
self.dfs_recursive(_a ,_a )
def __lowercase ( self : Union[str, Any] ,_a : int ,_a : list ):
'''simple docstring'''
_a : List[Any] = True
print(_a ,end=' ' )
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(_a ,_a )
if __name__ == "__main__":
__lowerCAmelCase = Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print("""DFS:""")
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3
| 271 | 1 |
'''simple docstring'''
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
__lowerCAmelCase = logging.get_logger(__name__)
logging.set_verbosity_info()
def UpperCAmelCase_ (__a : str , __a : str ):
"""simple docstring"""
if "xprophetnet" in prophetnet_checkpoint_path:
_a : int = XLMProphetNetForConditionalGenerationOld.from_pretrained(__a )
_a, _a : Any = XLMProphetNetForConditionalGeneration.from_pretrained(
__a , output_loading_info=__a )
else:
_a : Optional[int] = ProphetNetForConditionalGenerationOld.from_pretrained(__a )
_a, _a : int = ProphetNetForConditionalGeneration.from_pretrained(
__a , output_loading_info=__a )
_a : Optional[int] = ['key_proj', 'value_proj', 'query_proj']
_a : Optional[int] = {
'self_attn': 'ngram_self_attn',
'cross_attn': 'encoder_attn',
'cross_attn_layer_norm': 'encoder_attn_layer_norm',
'feed_forward_layer_norm': 'final_layer_norm',
'feed_forward': '',
'intermediate': 'fc1',
'output': 'fc2',
'key_proj': 'k_proj',
'query_proj': 'q_proj',
'value_proj': 'v_proj',
'word_embeddings': 'embed_tokens',
'embeddings_layer_norm': 'emb_layer_norm',
'relative_pos_embeddings': 'relative_linear',
'ngram_embeddings': 'ngram_input_embed',
'position_embeddings': 'embed_positions',
}
for key in loading_info["missing_keys"]:
_a : Union[str, Any] = key.split('.' )
if attributes[0] == "lm_head":
_a : Union[str, Any] = prophet
_a : str = prophet_old
else:
_a : Dict = prophet.prophetnet
_a : Union[str, Any] = prophet_old.model
_a : List[str] = False
for attribute in attributes:
if attribute in mapping:
_a : Optional[int] = mapping[attribute]
if not hasattr(__a , __a ) and len(__a ) > 0:
_a : str = attribute
elif hasattr(__a , __a ):
_a : List[str] = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
_a : Dict = old_model.weight
logger.info(f"""{attribute} is initialized.""" )
_a : List[Any] = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
_a : List[Any] = old_model.bias
logger.info(f"""{attribute} is initialized""" )
_a : int = True
break
elif attribute in special_keys and hasattr(__a , 'in_proj_weight' ):
_a : Any = old_model.in_proj_weight.shape[0] // 3
_a : Union[str, Any] = getattr(__a , __a )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
_a : Optional[Any] = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
_a : Optional[int] = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
_a : Tuple = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
_a : List[Any] = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
_a : Optional[int] = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
_a : Optional[int] = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
_a : Any = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 5_1_2, "We want 512 position_embeddings."
_a : List[str] = nn.Parameter(old_model.embed_positions.weight[:5_1_2, :] )
_a : List[Any] = True
break
if attribute.isdigit():
_a : str = model[int(__a )]
_a : List[str] = old_model[int(__a )]
else:
_a : Optional[int] = getattr(__a , __a )
if old_attribute == "":
_a : Optional[int] = old_model
else:
if not hasattr(__a , __a ):
raise ValueError(f"""{old_model} does not have {old_attribute}""" )
_a : Union[str, Any] = getattr(__a , __a )
if not is_key_init:
raise ValueError(f"""{key} was not correctly initialized!""" )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
prophet.save_pretrained(__a )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--prophetnet_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
__lowerCAmelCase = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 271 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = """▁"""
__lowerCAmelCase = {"""vocab_file""": """sentencepiece.bpe.model""", """monolingual_vocab_file""": """dict.txt"""}
__lowerCAmelCase = {
"""vocab_file""": {
"""vinai/bartpho-syllable""": """https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model""",
},
"""monolingual_vocab_file""": {
"""vinai/bartpho-syllable""": """https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt""",
},
}
__lowerCAmelCase = {"""vinai/bartpho-syllable""": 1_0_2_4}
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = VOCAB_FILES_NAMES
__UpperCAmelCase : Dict = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : Dict = ['''input_ids''', '''attention_mask''']
def __init__( self : str ,_a : str ,_a : Any ,_a : Any="<s>" ,_a : Dict="</s>" ,_a : int="</s>" ,_a : Union[str, Any]="<s>" ,_a : List[Any]="<unk>" ,_a : Optional[Any]="<pad>" ,_a : List[str]="<mask>" ,_a : Optional[Dict[str, Any]] = None ,**_a : int ,):
'''simple docstring'''
_a : Any = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else mask_token
_a : Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_a ,eos_token=_a ,unk_token=_a ,sep_token=_a ,cls_token=_a ,pad_token=_a ,mask_token=_a ,sp_model_kwargs=self.sp_model_kwargs ,**_a ,)
_a : Optional[int] = vocab_file
_a : Union[str, Any] = monolingual_vocab_file
_a : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_a ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
_a : Union[str, Any] = {}
_a : int = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(_a ) not in self.fairseq_tokens_to_ids:
_a : int = cnt
cnt += 1
with open(_a ,'r' ,encoding='utf-8' ) as f:
for line in f.readlines():
_a : str = line.strip().split()[0]
_a : Tuple = len(self.fairseq_tokens_to_ids )
if str(_a ) not in self.fairseq_tokens_to_ids:
_a : List[str] = len(self.fairseq_tokens_to_ids )
_a : Tuple = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : Union[str, Any] ):
'''simple docstring'''
_a : int = self.__dict__.copy()
_a : str = None
_a : Optional[Any] = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Tuple ,_a : Tuple ):
'''simple docstring'''
_a : Tuple = d
# for backward compatibility
if not hasattr(self ,'sp_model_kwargs' ):
_a : List[str] = {}
_a : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def __lowercase ( self : Dict ,_a : List[int] ,_a : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_a : Dict = [self.cls_token_id]
_a : int = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __lowercase ( self : int ,_a : List[int] ,_a : Optional[List[int]] = None ,_a : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_a ,token_ids_a=_a ,already_has_special_tokens=_a )
if token_ids_a is None:
return [1] + ([0] * len(_a )) + [1]
return [1] + ([0] * len(_a )) + [1, 1] + ([0] * len(_a )) + [1]
def __lowercase ( self : Tuple ,_a : List[int] ,_a : Optional[List[int]] = None ):
'''simple docstring'''
_a : List[str] = [self.sep_token_id]
_a : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __lowercase ( self : Dict ):
'''simple docstring'''
return len(self.fairseq_ids_to_tokens )
def __lowercase ( self : Dict ):
'''simple docstring'''
_a : List[str] = {self.convert_ids_to_tokens(_a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __lowercase ( self : Tuple ,_a : str ):
'''simple docstring'''
return self.sp_model.encode(_a ,out_type=_a )
def __lowercase ( self : Union[str, Any] ,_a : Union[str, Any] ):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def __lowercase ( self : Any ,_a : int ):
'''simple docstring'''
return self.fairseq_ids_to_tokens[index]
def __lowercase ( self : Tuple ,_a : Union[str, Any] ):
'''simple docstring'''
_a : str = ''.join(_a ).replace(_a ,' ' ).strip()
return out_string
def __lowercase ( self : Union[str, Any] ,_a : str ,_a : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(_a ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_a : int = os.path.join(
_a ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
_a : int = os.path.join(
_a ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['monolingual_vocab_file'] ,)
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,_a )
elif not os.path.isfile(self.vocab_file ):
with open(_a ,'wb' ) as fi:
_a : List[Any] = self.sp_model.serialized_model_proto()
fi.write(_a )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
_a ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file ,_a )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(_a ,'w' ,encoding='utf-8' ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(F"""{str(_a )} \n""" )
return out_vocab_file, out_monolingual_vocab_file
| 271 | 1 |
'''simple docstring'''
from statistics import mean, stdev
def UpperCAmelCase_ (__a : list , __a : int = 3 ):
"""simple docstring"""
_a : Optional[Any] = min(__a )
_a : int = max(__a )
# normalize data
return [round((x - x_min) / (x_max - x_min) , __a ) for x in data]
def UpperCAmelCase_ (__a : list , __a : int = 3 ):
"""simple docstring"""
_a : Dict = mean(__a )
_a : Union[str, Any] = stdev(__a )
# standardize data
return [round((x - mu) / (sigma) , __a ) for x in data]
| 271 |
'''simple docstring'''
import numpy as np
from transformers import BatchFeature
from transformers.testing_utils import require_tf, require_torch
from .test_feature_extraction_common import FeatureExtractionSavingTestMixin
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = None
__UpperCAmelCase : List[Any] = None
@property
def __lowercase ( self : Dict ):
'''simple docstring'''
return self.feat_extract_tester.prepare_feat_extract_dict()
def __lowercase ( self : str ):
'''simple docstring'''
_a : Dict = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(_a ,'feature_size' ) )
self.assertTrue(hasattr(_a ,'sampling_rate' ) )
self.assertTrue(hasattr(_a ,'padding_value' ) )
def __lowercase ( self : int ):
'''simple docstring'''
_a : Any = self.feat_extract_tester.prepare_inputs_for_common()
_a : str = self.feature_extraction_class(**self.feat_extract_dict )
_a : int = feat_extract.model_input_names[0]
_a : List[Any] = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(_a ) == len(_a ) for x, y in zip(_a ,processed_features[input_name] ) ) )
_a : Any = self.feat_extract_tester.prepare_inputs_for_common(equal_length=_a )
_a : Union[str, Any] = BatchFeature({input_name: speech_inputs} ,tensor_type='np' )
_a : Union[str, Any] = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
_a : Optional[int] = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_torch
def __lowercase ( self : Any ):
'''simple docstring'''
_a : List[Any] = self.feat_extract_tester.prepare_inputs_for_common(equal_length=_a )
_a : Dict = self.feature_extraction_class(**self.feat_extract_dict )
_a : int = feat_extract.model_input_names[0]
_a : str = BatchFeature({input_name: speech_inputs} ,tensor_type='pt' )
_a : str = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
_a : str = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_tf
def __lowercase ( self : int ):
'''simple docstring'''
_a : int = self.feat_extract_tester.prepare_inputs_for_common(equal_length=_a )
_a : Dict = self.feature_extraction_class(**self.feat_extract_dict )
_a : Tuple = feat_extract.model_input_names[0]
_a : int = BatchFeature({input_name: speech_inputs} ,tensor_type='tf' )
_a : Optional[int] = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
_a : Optional[Any] = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
def __lowercase ( self : Dict ,_a : Any=False ):
'''simple docstring'''
def _inputs_have_equal_length(_a : Tuple ):
_a : Tuple = len(input[0] )
for input_slice in input[1:]:
if len(_a ) != length:
return False
return True
def _inputs_are_equal(_a : Optional[Any] ,_a : Union[str, Any] ):
if len(_a ) != len(_a ):
return False
for input_slice_a, input_slice_a in zip(_a ,_a ):
if not np.allclose(np.asarray(_a ) ,np.asarray(_a ) ,atol=1E-3 ):
return False
return True
_a : int = self.feature_extraction_class(**self.feat_extract_dict )
_a : Tuple = self.feat_extract_tester.prepare_inputs_for_common(numpify=_a )
_a : Union[str, Any] = feat_extract.model_input_names[0]
_a : Tuple = BatchFeature({input_name: speech_inputs} )
_a : str = self.feat_extract_tester.seq_length_diff
_a : Dict = self.feat_extract_tester.max_seq_length + pad_diff
_a : Dict = self.feat_extract_tester.min_seq_length
_a : Optional[Any] = self.feat_extract_tester.batch_size
_a : Tuple = self.feat_extract_tester.feature_size
# test padding for List[int] + numpy
_a : int = feat_extract.pad(_a ,padding=_a )
_a : List[Any] = input_a[input_name]
_a : Tuple = feat_extract.pad(_a ,padding='longest' )
_a : Any = input_a[input_name]
_a : Optional[Any] = feat_extract.pad(_a ,padding='max_length' ,max_length=len(speech_inputs[-1] ) )
_a : List[str] = input_a[input_name]
_a : List[str] = feat_extract.pad(_a ,padding='longest' ,return_tensors='np' )
_a : str = input_a[input_name]
# max_length parameter has to be provided when setting `padding="max_length"`
with self.assertRaises(_a ):
feat_extract.pad(_a ,padding='max_length' )[input_name]
_a : int = feat_extract.pad(
_a ,padding='max_length' ,max_length=_a ,return_tensors='np' )
_a : Optional[int] = input_a[input_name]
self.assertFalse(_inputs_have_equal_length(_a ) )
self.assertTrue(_inputs_have_equal_length(_a ) )
self.assertTrue(_inputs_have_equal_length(_a ) )
self.assertTrue(_inputs_are_equal(_a ,_a ) )
self.assertTrue(len(input_a[0] ) == pad_min_length )
self.assertTrue(len(input_a[1] ) == pad_min_length + pad_diff )
self.assertTrue(input_a.shape[:2] == (batch_size, len(input_a[0] )) )
self.assertTrue(input_a.shape[:2] == (batch_size, pad_max_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == input_a.shape[2] == feature_size )
# test padding for `pad_to_multiple_of` for List[int] + numpy
_a : Tuple = feat_extract.pad(_a ,pad_to_multiple_of=10 )
_a : List[str] = input_a[input_name]
_a : str = feat_extract.pad(_a ,padding='longest' ,pad_to_multiple_of=10 )
_a : Tuple = input_a[input_name]
_a : Optional[int] = feat_extract.pad(
_a ,padding='max_length' ,pad_to_multiple_of=10 ,max_length=_a )
_a : Any = input_a[input_name]
_a : Optional[int] = feat_extract.pad(
_a ,padding='max_length' ,pad_to_multiple_of=10 ,max_length=_a ,return_tensors='np' ,)
_a : Dict = input_a[input_name]
self.assertTrue(all(len(_a ) % 10 == 0 for x in input_a ) )
self.assertTrue(_inputs_are_equal(_a ,_a ) )
_a : List[str] = pad_max_length if pad_max_length % 10 == 0 else (pad_max_length // 10 + 1) * 10
self.assertTrue(all(len(_a ) == expected_mult_pad_length for x in input_a ) )
self.assertEqual(input_a.shape[:2] ,(batch_size, expected_mult_pad_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == feature_size )
# Check padding value is correct
_a : Any = (np.ones(self.feat_extract_tester.feature_size ) * feat_extract.padding_value).sum()
self.assertTrue(
abs(np.asarray(input_a[0] )[pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) )
< 1E-3 )
self.assertTrue(
abs(
np.asarray(input_a[1] )[pad_min_length + pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - pad_diff) )
< 1E-3 )
self.assertTrue(
abs(
np.asarray(input_a[2] )[pad_min_length + 2 * pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - 2 * pad_diff) )
< 1E-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) ) < 1E-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (expected_mult_pad_length - pad_min_length) )
< 1E-3 )
def __lowercase ( self : List[Any] ,_a : Optional[int]=False ):
'''simple docstring'''
def _inputs_have_equal_length(_a : List[str] ):
_a : Union[str, Any] = len(input[0] )
for input_slice in input[1:]:
if len(_a ) != length:
return False
return True
def _inputs_are_equal(_a : List[str] ,_a : List[str] ):
if len(_a ) != len(_a ):
return False
for input_slice_a, input_slice_a in zip(_a ,_a ):
if not np.allclose(np.asarray(_a ) ,np.asarray(_a ) ,atol=1E-3 ):
return False
return True
_a : Dict = self.feature_extraction_class(**self.feat_extract_dict )
_a : str = self.feat_extract_tester.prepare_inputs_for_common(numpify=_a )
_a : Any = feat_extract.model_input_names[0]
_a : List[Any] = BatchFeature({input_name: speech_inputs} )
# truncate to smallest
_a : Union[str, Any] = feat_extract.pad(
_a ,padding='max_length' ,max_length=len(speech_inputs[0] ) ,truncation=_a )
_a : str = input_a[input_name]
_a : List[str] = feat_extract.pad(_a ,padding='max_length' ,max_length=len(speech_inputs[0] ) )
_a : Tuple = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(_a ) )
self.assertFalse(_inputs_have_equal_length(_a ) )
# truncate to smallest with np
_a : Dict = feat_extract.pad(
_a ,padding='max_length' ,max_length=len(speech_inputs[0] ) ,return_tensors='np' ,truncation=_a ,)
_a : Any = input_a[input_name]
_a : List[Any] = feat_extract.pad(
_a ,padding='max_length' ,max_length=len(speech_inputs[0] ) ,return_tensors='np' )
_a : int = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(_a ) )
self.assertTrue(input_a.shape[1] == len(speech_inputs[0] ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(_a ) )
# truncate to middle
_a : Dict = feat_extract.pad(
_a ,padding='max_length' ,max_length=len(speech_inputs[1] ) ,truncation=_a ,return_tensors='np' ,)
_a : List[Any] = input_a[input_name]
_a : Tuple = feat_extract.pad(
_a ,padding='max_length' ,max_length=len(speech_inputs[1] ) ,truncation=_a )
_a : Tuple = input_a[input_name]
_a : Tuple = feat_extract.pad(
_a ,padding='max_length' ,max_length=len(speech_inputs[1] ) ,return_tensors='np' )
_a : Dict = input_a[input_name]
self.assertTrue(input_a.shape[1] == len(speech_inputs[1] ) )
self.assertTrue(_inputs_have_equal_length(_a ) )
self.assertTrue(_inputs_have_equal_length(_a ) )
self.assertTrue(_inputs_are_equal(_a ,_a ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(_a ) )
self.assertTrue(len(input_a[-1] ) == len(speech_inputs[-1] ) )
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(_a ):
feat_extract.pad(_a ,truncation=_a )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(_a ):
feat_extract.pad(_a ,padding='longest' ,truncation=_a )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(_a ):
feat_extract.pad(_a ,padding='longest' ,truncation=_a )[input_name]
# max_length parameter has to be provided when setting `truncation=True` and padding="max_length"
with self.assertRaises(_a ):
feat_extract.pad(_a ,padding='max_length' ,truncation=_a )[input_name]
# test truncation for `pad_to_multiple_of` for List[int] + numpy
_a : Optional[Any] = 12
_a : List[Any] = feat_extract.pad(
_a ,padding='max_length' ,max_length=len(speech_inputs[0] ) ,pad_to_multiple_of=_a ,truncation=_a ,)
_a : Tuple = input_a[input_name]
_a : str = feat_extract.pad(
_a ,padding='max_length' ,max_length=len(speech_inputs[0] ) ,pad_to_multiple_of=_a ,)
_a : List[Any] = input_a[input_name]
# retrieve expected_length as multiple of pad_to_multiple_of
_a : List[Any] = len(speech_inputs[0] )
if expected_length % pad_to_multiple_of != 0:
_a : Union[str, Any] = ((len(speech_inputs[0] ) // pad_to_multiple_of) + 1) * pad_to_multiple_of
self.assertTrue(len(input_a[0] ) == expected_length )
self.assertTrue(_inputs_have_equal_length(_a ) )
self.assertFalse(_inputs_have_equal_length(_a ) )
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
self._check_padding(numpify=_a )
def __lowercase ( self : Tuple ):
'''simple docstring'''
self._check_padding(numpify=_a )
def __lowercase ( self : Dict ):
'''simple docstring'''
self._check_truncation(numpify=_a )
def __lowercase ( self : str ):
'''simple docstring'''
self._check_truncation(numpify=_a )
@require_torch
def __lowercase ( self : Dict ):
'''simple docstring'''
_a : Any = self.feature_extraction_class(**self.feat_extract_dict )
_a : List[Any] = self.feat_extract_tester.prepare_inputs_for_common()
_a : Union[str, Any] = feat_extract.model_input_names[0]
_a : Optional[int] = BatchFeature({input_name: speech_inputs} )
_a : List[Any] = feat_extract.pad(_a ,padding='longest' ,return_tensors='np' )[input_name]
_a : List[str] = feat_extract.pad(_a ,padding='longest' ,return_tensors='pt' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1E-2 )
@require_tf
def __lowercase ( self : int ):
'''simple docstring'''
_a : List[str] = self.feature_extraction_class(**self.feat_extract_dict )
_a : Optional[Any] = self.feat_extract_tester.prepare_inputs_for_common()
_a : Dict = feat_extract.model_input_names[0]
_a : Optional[Any] = BatchFeature({input_name: speech_inputs} )
_a : Dict = feat_extract.pad(_a ,padding='longest' ,return_tensors='np' )[input_name]
_a : Any = feat_extract.pad(_a ,padding='longest' ,return_tensors='tf' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_tf.numpy().astype(np.floataa ).sum() ) < 1E-2 )
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
_a : str = self.feat_extract_dict
_a : List[Any] = True
_a : Optional[int] = self.feature_extraction_class(**_a )
_a : List[Any] = self.feat_extract_tester.prepare_inputs_for_common()
_a : Tuple = [len(_a ) for x in speech_inputs]
_a : int = feat_extract.model_input_names[0]
_a : Optional[Any] = BatchFeature({input_name: speech_inputs} )
_a : str = feat_extract.pad(_a ,padding='longest' ,return_tensors='np' )
self.assertIn('attention_mask' ,_a )
self.assertListEqual(list(processed.attention_mask.shape ) ,list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() ,_a )
def __lowercase ( self : int ):
'''simple docstring'''
_a : Any = self.feat_extract_dict
_a : Tuple = True
_a : Optional[int] = self.feature_extraction_class(**_a )
_a : Dict = self.feat_extract_tester.prepare_inputs_for_common()
_a : Dict = [len(_a ) for x in speech_inputs]
_a : Union[str, Any] = feat_extract.model_input_names[0]
_a : Any = BatchFeature({input_name: speech_inputs} )
_a : List[Any] = min(_a )
_a : Dict = feat_extract.pad(
_a ,padding='max_length' ,max_length=_a ,truncation=_a ,return_tensors='np' )
self.assertIn('attention_mask' ,_a )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) ,[processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() ,[max_length for x in speech_inputs] )
| 271 | 1 |
'''simple docstring'''
import importlib
import inspect
import json
import os
import re
import shutil
import sys
from pathlib import Path
from typing import Dict, Optional, Union
from urllib import request
from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info
from packaging import version
from .. import __version__
from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging
__lowerCAmelCase = (
"""https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py"""
)
__lowerCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
def UpperCAmelCase_ ():
"""simple docstring"""
_a : Optional[int] = 'https://pypi.org/pypi/diffusers/json'
_a : int = json.loads(request.urlopen(__a ).read() )['releases'].keys()
return sorted(__a , key=lambda __a : version.Version(__a ) )
def UpperCAmelCase_ ():
"""simple docstring"""
if HF_MODULES_CACHE in sys.path:
return
sys.path.append(__a )
os.makedirs(__a , exist_ok=__a )
_a : str = Path(__a ) / '__init__.py'
if not init_path.exists():
init_path.touch()
def UpperCAmelCase_ (__a : Union[str, os.PathLike] ):
"""simple docstring"""
init_hf_modules()
_a : Dict = Path(__a ) / name
# If the parent module does not exist yet, recursively create it.
if not dynamic_module_path.parent.exists():
create_dynamic_module(dynamic_module_path.parent )
os.makedirs(__a , exist_ok=__a )
_a : Optional[int] = dynamic_module_path / '__init__.py'
if not init_path.exists():
init_path.touch()
def UpperCAmelCase_ (__a : str ):
"""simple docstring"""
with open(__a , 'r' , encoding='utf-8' ) as f:
_a : int = f.read()
# Imports of the form `import .xxx`
_a : Tuple = re.findall('^\s*import\s+\.(\S+)\s*$' , __a , flags=re.MULTILINE )
# Imports of the form `from .xxx import yyy`
relative_imports += re.findall('^\s*from\s+\.(\S+)\s+import' , __a , flags=re.MULTILINE )
# Unique-ify
return list(set(__a ) )
def UpperCAmelCase_ (__a : Any ):
"""simple docstring"""
_a : Optional[int] = False
_a : Optional[int] = [module_file]
_a : List[str] = []
# Let's recurse through all relative imports
while not no_change:
_a : str = []
for f in files_to_check:
new_imports.extend(get_relative_imports(__a ) )
_a : Union[str, Any] = Path(__a ).parent
_a : str = [str(module_path / m ) for m in new_imports]
_a : Tuple = [f for f in new_import_files if f not in all_relative_imports]
_a : Dict = [f"""{f}.py""" for f in new_import_files]
_a : List[str] = len(__a ) == 0
all_relative_imports.extend(__a )
return all_relative_imports
def UpperCAmelCase_ (__a : Tuple ):
"""simple docstring"""
with open(__a , 'r' , encoding='utf-8' ) as f:
_a : Dict = f.read()
# Imports of the form `import xxx`
_a : Optional[int] = re.findall('^\s*import\s+(\S+)\s*$' , __a , flags=re.MULTILINE )
# Imports of the form `from xxx import yyy`
imports += re.findall('^\s*from\s+(\S+)\s+import' , __a , flags=re.MULTILINE )
# Only keep the top-level module
_a : List[str] = [imp.split('.' )[0] for imp in imports if not imp.startswith('.' )]
# Unique-ify and test we got them all
_a : Optional[int] = list(set(__a ) )
_a : List[str] = []
for imp in imports:
try:
importlib.import_module(__a )
except ImportError:
missing_packages.append(__a )
if len(__a ) > 0:
raise ImportError(
'This modeling file requires the following packages that were not found in your environment: '
f"""{', '.join(__a )}. Run `pip install {' '.join(__a )}`""" )
return get_relative_imports(__a )
def UpperCAmelCase_ (__a : Any , __a : str ):
"""simple docstring"""
_a : Any = module_path.replace(os.path.sep , '.' )
_a : Union[str, Any] = importlib.import_module(__a )
if class_name is None:
return find_pipeline_class(__a )
return getattr(__a , __a )
def UpperCAmelCase_ (__a : Optional[int] ):
"""simple docstring"""
from ..pipelines import DiffusionPipeline
_a : List[str] = dict(inspect.getmembers(__a , inspect.isclass ) )
_a : str = None
for cls_name, cls in cls_members.items():
if (
cls_name != DiffusionPipeline.__name__
and issubclass(cls , __a )
and cls.__module__.split('.' )[0] != "diffusers"
):
if pipeline_class is not None:
raise ValueError(
f"""Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:"""
f""" {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in"""
f""" {loaded_module}.""" )
_a : Any = cls
return pipeline_class
def UpperCAmelCase_ (__a : Union[str, os.PathLike] , __a : str , __a : Optional[Union[str, os.PathLike]] = None , __a : bool = False , __a : bool = False , __a : Optional[Dict[str, str]] = None , __a : Optional[Union[bool, str]] = None , __a : Optional[str] = None , __a : bool = False , ):
"""simple docstring"""
_a : str = str(__a )
_a : Optional[Any] = os.path.join(__a , __a )
if os.path.isfile(__a ):
_a : Tuple = module_file_or_url
_a : Optional[Any] = 'local'
elif pretrained_model_name_or_path.count('/' ) == 0:
_a : int = get_diffusers_versions()
# cut ".dev0"
_a : Any = 'v' + '.'.join(__version__.split('.' )[:3] )
# retrieve github version that matches
if revision is None:
_a : Any = latest_version if latest_version[1:] in available_versions else 'main'
logger.info(f"""Defaulting to latest_version: {revision}.""" )
elif revision in available_versions:
_a : Any = f"""v{revision}"""
elif revision == "main":
_a : Optional[int] = revision
else:
raise ValueError(
f"""`custom_revision`: {revision} does not exist. Please make sure to choose one of"""
f""" {', '.join(available_versions + ['main'] )}.""" )
# community pipeline on GitHub
_a : Tuple = COMMUNITY_PIPELINES_URL.format(revision=__a , pipeline=__a )
try:
_a : Any = cached_download(
__a , cache_dir=__a , force_download=__a , proxies=__a , resume_download=__a , local_files_only=__a , use_auth_token=__a , )
_a : List[Any] = 'git'
_a : Any = pretrained_model_name_or_path + '.py'
except EnvironmentError:
logger.error(f"""Could not locate the {module_file} inside {pretrained_model_name_or_path}.""" )
raise
else:
try:
# Load from URL or cache if already cached
_a : Optional[Any] = hf_hub_download(
__a , __a , cache_dir=__a , force_download=__a , proxies=__a , resume_download=__a , local_files_only=__a , use_auth_token=__a , )
_a : List[Any] = os.path.join('local' , '--'.join(pretrained_model_name_or_path.split('/' ) ) )
except EnvironmentError:
logger.error(f"""Could not locate the {module_file} inside {pretrained_model_name_or_path}.""" )
raise
# Check we have all the requirements in our environment
_a : Optional[int] = check_imports(__a )
# Now we move the module inside our cached dynamic modules.
_a : Optional[Any] = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule
create_dynamic_module(__a )
_a : Any = Path(__a ) / full_submodule
if submodule == "local" or submodule == "git":
# We always copy local files (we could hash the file to see if there was a change, and give them the name of
# that hash, to only copy when there is a modification but it seems overkill for now).
# The only reason we do the copy is to avoid putting too many folders in sys.path.
shutil.copy(__a , submodule_path / module_file )
for module_needed in modules_needed:
_a : Dict = f"""{module_needed}.py"""
shutil.copy(os.path.join(__a , __a ) , submodule_path / module_needed )
else:
# Get the commit hash
# TODO: we will get this info in the etag soon, so retrieve it from there and not here.
if isinstance(__a , __a ):
_a : Optional[Any] = use_auth_token
elif use_auth_token is True:
_a : List[Any] = HfFolder.get_token()
else:
_a : Dict = None
_a : int = model_info(__a , revision=__a , token=__a ).sha
# The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the
# benefit of versioning.
_a : Optional[int] = submodule_path / commit_hash
_a : str = full_submodule + os.path.sep + commit_hash
create_dynamic_module(__a )
if not (submodule_path / module_file).exists():
shutil.copy(__a , submodule_path / module_file )
# Make sure we also have every file with relative
for module_needed in modules_needed:
if not (submodule_path / module_needed).exists():
get_cached_module_file(
__a , f"""{module_needed}.py""" , cache_dir=__a , force_download=__a , resume_download=__a , proxies=__a , use_auth_token=__a , revision=__a , local_files_only=__a , )
return os.path.join(__a , __a )
def UpperCAmelCase_ (__a : Union[str, os.PathLike] , __a : str , __a : Optional[str] = None , __a : Optional[Union[str, os.PathLike]] = None , __a : bool = False , __a : bool = False , __a : Optional[Dict[str, str]] = None , __a : Optional[Union[bool, str]] = None , __a : Optional[str] = None , __a : bool = False , **__a : str , ):
"""simple docstring"""
_a : Dict = get_cached_module_file(
__a , __a , cache_dir=__a , force_download=__a , resume_download=__a , proxies=__a , use_auth_token=__a , revision=__a , local_files_only=__a , )
return get_class_in_module(__a , final_module.replace('.py' , '' ) )
| 271 |
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import KarrasVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
__UpperCAmelCase : UNetaDModel
__UpperCAmelCase : KarrasVeScheduler
def __init__( self : Union[str, Any] ,_a : UNetaDModel ,_a : KarrasVeScheduler ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=_a ,scheduler=_a )
@torch.no_grad()
def __call__( self : List[Any] ,_a : int = 1 ,_a : int = 50 ,_a : Optional[Union[torch.Generator, List[torch.Generator]]] = None ,_a : Optional[str] = "pil" ,_a : bool = True ,**_a : List[Any] ,):
'''simple docstring'''
_a : Any = self.unet.config.sample_size
_a : Optional[int] = (batch_size, 3, img_size, img_size)
_a : Dict = self.unet
# sample x_0 ~ N(0, sigma_0^2 * I)
_a : Dict = randn_tensor(_a ,generator=_a ,device=self.device ) * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(_a )
for t in self.progress_bar(self.scheduler.timesteps ):
# here sigma_t == t_i from the paper
_a : Optional[int] = self.scheduler.schedule[t]
_a : List[str] = self.scheduler.schedule[t - 1] if t > 0 else 0
# 1. Select temporarily increased noise level sigma_hat
# 2. Add new noise to move from sample_i to sample_hat
_a, _a : List[Any] = self.scheduler.add_noise_to_input(_a ,_a ,generator=_a )
# 3. Predict the noise residual given the noise magnitude `sigma_hat`
# The model inputs and output are adjusted by following eq. (213) in [1].
_a : Optional[int] = (sigma_hat / 2) * model((sample_hat + 1) / 2 ,sigma_hat / 2 ).sample
# 4. Evaluate dx/dt at sigma_hat
# 5. Take Euler step from sigma to sigma_prev
_a : Tuple = self.scheduler.step(_a ,_a ,_a ,_a )
if sigma_prev != 0:
# 6. Apply 2nd order correction
# The model inputs and output are adjusted by following eq. (213) in [1].
_a : Optional[int] = (sigma_prev / 2) * model((step_output.prev_sample + 1) / 2 ,sigma_prev / 2 ).sample
_a : Optional[Any] = self.scheduler.step_correct(
_a ,_a ,_a ,_a ,step_output.prev_sample ,step_output['derivative'] ,)
_a : Dict = step_output.prev_sample
_a : Tuple = (sample / 2 + 0.5).clamp(0 ,1 )
_a : Optional[Any] = sample.cpu().permute(0 ,2 ,3 ,1 ).numpy()
if output_type == "pil":
_a : List[str] = self.numpy_to_pil(_a )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_a )
| 271 | 1 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_electra import ElectraTokenizer
__lowerCAmelCase = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
__lowerCAmelCase = {
"""vocab_file""": {
"""google/electra-small-generator""": (
"""https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt"""
),
"""google/electra-base-generator""": """https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt""",
"""google/electra-large-generator""": (
"""https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt"""
),
"""google/electra-small-discriminator""": (
"""https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt"""
),
"""google/electra-base-discriminator""": (
"""https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt"""
),
"""google/electra-large-discriminator""": (
"""https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""google/electra-small-generator""": (
"""https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json"""
),
"""google/electra-base-generator""": (
"""https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json"""
),
"""google/electra-large-generator""": (
"""https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json"""
),
"""google/electra-small-discriminator""": (
"""https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json"""
),
"""google/electra-base-discriminator""": (
"""https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json"""
),
"""google/electra-large-discriminator""": (
"""https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json"""
),
},
}
__lowerCAmelCase = {
"""google/electra-small-generator""": 5_1_2,
"""google/electra-base-generator""": 5_1_2,
"""google/electra-large-generator""": 5_1_2,
"""google/electra-small-discriminator""": 5_1_2,
"""google/electra-base-discriminator""": 5_1_2,
"""google/electra-large-discriminator""": 5_1_2,
}
__lowerCAmelCase = {
"""google/electra-small-generator""": {"""do_lower_case""": True},
"""google/electra-base-generator""": {"""do_lower_case""": True},
"""google/electra-large-generator""": {"""do_lower_case""": True},
"""google/electra-small-discriminator""": {"""do_lower_case""": True},
"""google/electra-base-discriminator""": {"""do_lower_case""": True},
"""google/electra-large-discriminator""": {"""do_lower_case""": True},
}
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = VOCAB_FILES_NAMES
__UpperCAmelCase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : List[str] = PRETRAINED_INIT_CONFIGURATION
__UpperCAmelCase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : Optional[int] = ElectraTokenizer
def __init__( self : Optional[Any] ,_a : str=None ,_a : List[Any]=None ,_a : List[str]=True ,_a : Tuple="[UNK]" ,_a : List[Any]="[SEP]" ,_a : List[Any]="[PAD]" ,_a : int="[CLS]" ,_a : List[str]="[MASK]" ,_a : List[Any]=True ,_a : List[str]=None ,**_a : int ,):
'''simple docstring'''
super().__init__(
_a ,tokenizer_file=_a ,do_lower_case=_a ,unk_token=_a ,sep_token=_a ,pad_token=_a ,cls_token=_a ,mask_token=_a ,tokenize_chinese_chars=_a ,strip_accents=_a ,**_a ,)
_a : Union[str, Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' ,_a ) != do_lower_case
or normalizer_state.get('strip_accents' ,_a ) != strip_accents
or normalizer_state.get('handle_chinese_chars' ,_a ) != tokenize_chinese_chars
):
_a : Tuple = getattr(_a ,normalizer_state.pop('type' ) )
_a : Tuple = do_lower_case
_a : int = strip_accents
_a : Dict = tokenize_chinese_chars
_a : List[Any] = normalizer_class(**_a )
_a : int = do_lower_case
def __lowercase ( self : Optional[int] ,_a : str ,_a : str=None ):
'''simple docstring'''
_a : str = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __lowercase ( self : List[str] ,_a : List[int] ,_a : Optional[List[int]] = None ):
'''simple docstring'''
_a : List[str] = [self.sep_token_id]
_a : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowercase ( self : str ,_a : str ,_a : Optional[str] = None ):
'''simple docstring'''
_a : str = self._tokenizer.model.save(_a ,name=_a )
return tuple(_a )
| 271 |
'''simple docstring'''
import importlib
import inspect
import json
import os
import re
import shutil
import sys
from pathlib import Path
from typing import Dict, Optional, Union
from urllib import request
from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info
from packaging import version
from .. import __version__
from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging
__lowerCAmelCase = (
"""https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py"""
)
__lowerCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
def UpperCAmelCase_ ():
"""simple docstring"""
_a : Optional[int] = 'https://pypi.org/pypi/diffusers/json'
_a : int = json.loads(request.urlopen(__a ).read() )['releases'].keys()
return sorted(__a , key=lambda __a : version.Version(__a ) )
def UpperCAmelCase_ ():
"""simple docstring"""
if HF_MODULES_CACHE in sys.path:
return
sys.path.append(__a )
os.makedirs(__a , exist_ok=__a )
_a : str = Path(__a ) / '__init__.py'
if not init_path.exists():
init_path.touch()
def UpperCAmelCase_ (__a : Union[str, os.PathLike] ):
"""simple docstring"""
init_hf_modules()
_a : Dict = Path(__a ) / name
# If the parent module does not exist yet, recursively create it.
if not dynamic_module_path.parent.exists():
create_dynamic_module(dynamic_module_path.parent )
os.makedirs(__a , exist_ok=__a )
_a : Optional[int] = dynamic_module_path / '__init__.py'
if not init_path.exists():
init_path.touch()
def UpperCAmelCase_ (__a : str ):
"""simple docstring"""
with open(__a , 'r' , encoding='utf-8' ) as f:
_a : int = f.read()
# Imports of the form `import .xxx`
_a : Tuple = re.findall('^\s*import\s+\.(\S+)\s*$' , __a , flags=re.MULTILINE )
# Imports of the form `from .xxx import yyy`
relative_imports += re.findall('^\s*from\s+\.(\S+)\s+import' , __a , flags=re.MULTILINE )
# Unique-ify
return list(set(__a ) )
def UpperCAmelCase_ (__a : Any ):
"""simple docstring"""
_a : Optional[int] = False
_a : Optional[int] = [module_file]
_a : List[str] = []
# Let's recurse through all relative imports
while not no_change:
_a : str = []
for f in files_to_check:
new_imports.extend(get_relative_imports(__a ) )
_a : Union[str, Any] = Path(__a ).parent
_a : str = [str(module_path / m ) for m in new_imports]
_a : Tuple = [f for f in new_import_files if f not in all_relative_imports]
_a : Dict = [f"""{f}.py""" for f in new_import_files]
_a : List[str] = len(__a ) == 0
all_relative_imports.extend(__a )
return all_relative_imports
def UpperCAmelCase_ (__a : Tuple ):
"""simple docstring"""
with open(__a , 'r' , encoding='utf-8' ) as f:
_a : Dict = f.read()
# Imports of the form `import xxx`
_a : Optional[int] = re.findall('^\s*import\s+(\S+)\s*$' , __a , flags=re.MULTILINE )
# Imports of the form `from xxx import yyy`
imports += re.findall('^\s*from\s+(\S+)\s+import' , __a , flags=re.MULTILINE )
# Only keep the top-level module
_a : List[str] = [imp.split('.' )[0] for imp in imports if not imp.startswith('.' )]
# Unique-ify and test we got them all
_a : Optional[int] = list(set(__a ) )
_a : List[str] = []
for imp in imports:
try:
importlib.import_module(__a )
except ImportError:
missing_packages.append(__a )
if len(__a ) > 0:
raise ImportError(
'This modeling file requires the following packages that were not found in your environment: '
f"""{', '.join(__a )}. Run `pip install {' '.join(__a )}`""" )
return get_relative_imports(__a )
def UpperCAmelCase_ (__a : Any , __a : str ):
"""simple docstring"""
_a : Any = module_path.replace(os.path.sep , '.' )
_a : Union[str, Any] = importlib.import_module(__a )
if class_name is None:
return find_pipeline_class(__a )
return getattr(__a , __a )
def UpperCAmelCase_ (__a : Optional[int] ):
"""simple docstring"""
from ..pipelines import DiffusionPipeline
_a : List[str] = dict(inspect.getmembers(__a , inspect.isclass ) )
_a : str = None
for cls_name, cls in cls_members.items():
if (
cls_name != DiffusionPipeline.__name__
and issubclass(cls , __a )
and cls.__module__.split('.' )[0] != "diffusers"
):
if pipeline_class is not None:
raise ValueError(
f"""Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:"""
f""" {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in"""
f""" {loaded_module}.""" )
_a : Any = cls
return pipeline_class
def UpperCAmelCase_ (__a : Union[str, os.PathLike] , __a : str , __a : Optional[Union[str, os.PathLike]] = None , __a : bool = False , __a : bool = False , __a : Optional[Dict[str, str]] = None , __a : Optional[Union[bool, str]] = None , __a : Optional[str] = None , __a : bool = False , ):
"""simple docstring"""
_a : str = str(__a )
_a : Optional[Any] = os.path.join(__a , __a )
if os.path.isfile(__a ):
_a : Tuple = module_file_or_url
_a : Optional[Any] = 'local'
elif pretrained_model_name_or_path.count('/' ) == 0:
_a : int = get_diffusers_versions()
# cut ".dev0"
_a : Any = 'v' + '.'.join(__version__.split('.' )[:3] )
# retrieve github version that matches
if revision is None:
_a : Any = latest_version if latest_version[1:] in available_versions else 'main'
logger.info(f"""Defaulting to latest_version: {revision}.""" )
elif revision in available_versions:
_a : Any = f"""v{revision}"""
elif revision == "main":
_a : Optional[int] = revision
else:
raise ValueError(
f"""`custom_revision`: {revision} does not exist. Please make sure to choose one of"""
f""" {', '.join(available_versions + ['main'] )}.""" )
# community pipeline on GitHub
_a : Tuple = COMMUNITY_PIPELINES_URL.format(revision=__a , pipeline=__a )
try:
_a : Any = cached_download(
__a , cache_dir=__a , force_download=__a , proxies=__a , resume_download=__a , local_files_only=__a , use_auth_token=__a , )
_a : List[Any] = 'git'
_a : Any = pretrained_model_name_or_path + '.py'
except EnvironmentError:
logger.error(f"""Could not locate the {module_file} inside {pretrained_model_name_or_path}.""" )
raise
else:
try:
# Load from URL or cache if already cached
_a : Optional[Any] = hf_hub_download(
__a , __a , cache_dir=__a , force_download=__a , proxies=__a , resume_download=__a , local_files_only=__a , use_auth_token=__a , )
_a : List[Any] = os.path.join('local' , '--'.join(pretrained_model_name_or_path.split('/' ) ) )
except EnvironmentError:
logger.error(f"""Could not locate the {module_file} inside {pretrained_model_name_or_path}.""" )
raise
# Check we have all the requirements in our environment
_a : Optional[int] = check_imports(__a )
# Now we move the module inside our cached dynamic modules.
_a : Optional[Any] = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule
create_dynamic_module(__a )
_a : Any = Path(__a ) / full_submodule
if submodule == "local" or submodule == "git":
# We always copy local files (we could hash the file to see if there was a change, and give them the name of
# that hash, to only copy when there is a modification but it seems overkill for now).
# The only reason we do the copy is to avoid putting too many folders in sys.path.
shutil.copy(__a , submodule_path / module_file )
for module_needed in modules_needed:
_a : Dict = f"""{module_needed}.py"""
shutil.copy(os.path.join(__a , __a ) , submodule_path / module_needed )
else:
# Get the commit hash
# TODO: we will get this info in the etag soon, so retrieve it from there and not here.
if isinstance(__a , __a ):
_a : Optional[Any] = use_auth_token
elif use_auth_token is True:
_a : List[Any] = HfFolder.get_token()
else:
_a : Dict = None
_a : int = model_info(__a , revision=__a , token=__a ).sha
# The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the
# benefit of versioning.
_a : Optional[int] = submodule_path / commit_hash
_a : str = full_submodule + os.path.sep + commit_hash
create_dynamic_module(__a )
if not (submodule_path / module_file).exists():
shutil.copy(__a , submodule_path / module_file )
# Make sure we also have every file with relative
for module_needed in modules_needed:
if not (submodule_path / module_needed).exists():
get_cached_module_file(
__a , f"""{module_needed}.py""" , cache_dir=__a , force_download=__a , resume_download=__a , proxies=__a , use_auth_token=__a , revision=__a , local_files_only=__a , )
return os.path.join(__a , __a )
def UpperCAmelCase_ (__a : Union[str, os.PathLike] , __a : str , __a : Optional[str] = None , __a : Optional[Union[str, os.PathLike]] = None , __a : bool = False , __a : bool = False , __a : Optional[Dict[str, str]] = None , __a : Optional[Union[bool, str]] = None , __a : Optional[str] = None , __a : bool = False , **__a : str , ):
"""simple docstring"""
_a : Dict = get_cached_module_file(
__a , __a , cache_dir=__a , force_download=__a , resume_download=__a , proxies=__a , use_auth_token=__a , revision=__a , local_files_only=__a , )
return get_class_in_module(__a , final_module.replace('.py' , '' ) )
| 271 | 1 |
'''simple docstring'''
__lowerCAmelCase = 8.3_144_598
def UpperCAmelCase_ (__a : float , __a : float ):
"""simple docstring"""
if temperature < 0:
raise Exception('Temperature cannot be less than 0 K' )
if molar_mass <= 0:
raise Exception('Molar mass cannot be less than or equal to 0 kg/mol' )
else:
return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# example
__lowerCAmelCase = 3_0_0
__lowerCAmelCase = 2_8
__lowerCAmelCase = rms_speed_of_molecule(temperature, molar_mass)
print(f'''Vrms of Nitrogen gas at 300 K is {vrms} m/s''')
| 271 |
'''simple docstring'''
def UpperCAmelCase_ (__a : list , __a : list , __a : int ):
"""simple docstring"""
_a : Optional[Any] = len(__a )
_a : int = [[0] * n for i in range(__a )]
for i in range(__a ):
_a : Tuple = y_points[i]
for i in range(2 , __a ):
for j in range(__a , __a ):
_a : Tuple = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 271 | 1 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
__lowerCAmelCase = TypeVar("""T""")
class UpperCAmelCase__ ( Generic[T] ):
"""simple docstring"""
def __init__( self : Tuple ,_a : T ):
'''simple docstring'''
_a : List[str] = data
_a : Node[T] | None = None
def __str__( self : Dict ):
'''simple docstring'''
return F"""{self.data}"""
class UpperCAmelCase__ ( Generic[T] ):
"""simple docstring"""
def __init__( self : Optional[int] ):
'''simple docstring'''
_a : Node[T] | None = None
def __iter__( self : str ):
'''simple docstring'''
_a : Tuple = self.top
while node:
yield node.data
_a : int = node.next
def __str__( self : str ):
'''simple docstring'''
return "->".join([str(_a ) for item in self] )
def __len__( self : Optional[Any] ):
'''simple docstring'''
return len(tuple(iter(self ) ) )
def __lowercase ( self : str ):
'''simple docstring'''
return self.top is None
def __lowercase ( self : List[Any] ,_a : T ):
'''simple docstring'''
_a : int = Node(_a )
if not self.is_empty():
_a : Optional[Any] = self.top
_a : List[str] = node
def __lowercase ( self : Tuple ):
'''simple docstring'''
if self.is_empty():
raise IndexError('pop from empty stack' )
assert isinstance(self.top ,_a )
_a : List[Any] = self.top
_a : int = self.top.next
return pop_node.data
def __lowercase ( self : List[str] ):
'''simple docstring'''
if self.is_empty():
raise IndexError('peek from empty stack' )
assert self.top is not None
return self.top.data
def __lowercase ( self : List[str] ):
'''simple docstring'''
_a : Optional[int] = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 271 |
'''simple docstring'''
import inspect
import os
import unittest
from pathlib import Path
import torch
import accelerate
from accelerate.test_utils import execute_subprocess_async
from accelerate.test_utils.testing import run_command
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = inspect.getfile(accelerate.test_utils )
__UpperCAmelCase : List[str] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_cli.py'''] )
__UpperCAmelCase : Dict = ['''accelerate''', '''launch''']
__UpperCAmelCase : Dict = Path.home() / '''.cache/huggingface/accelerate'''
__UpperCAmelCase : Dict = '''default_config.yaml'''
__UpperCAmelCase : Optional[Any] = config_folder / config_file
__UpperCAmelCase : Dict = config_folder / '''_default_config.yaml'''
__UpperCAmelCase : Any = Path('''tests/test_configs''' )
@classmethod
def __lowercase ( cls : int ):
'''simple docstring'''
if cls.config_path.is_file():
cls.config_path.rename(cls.changed_path )
@classmethod
def __lowercase ( cls : List[Any] ):
'''simple docstring'''
if cls.changed_path.is_file():
cls.changed_path.rename(cls.config_path )
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
_a : Dict = self.base_cmd
if torch.cuda.is_available() and (torch.cuda.device_count() > 1):
cmd += ["--multi_gpu"]
execute_subprocess_async(cmd + [self.test_file_path] ,env=os.environ.copy() )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
for config in sorted(self.test_config_path.glob('**/*.yaml' ) ):
with self.subTest(config_file=_a ):
execute_subprocess_async(
self.base_cmd + ['--config_file', str(_a ), self.test_file_path] ,env=os.environ.copy() )
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
execute_subprocess_async(['accelerate', 'test'] ,env=os.environ.copy() )
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = '''test-tpu'''
__UpperCAmelCase : Any = '''us-central1-a'''
__UpperCAmelCase : List[Any] = '''ls'''
__UpperCAmelCase : Any = ['''accelerate''', '''tpu-config''']
__UpperCAmelCase : Dict = '''cd /usr/share'''
__UpperCAmelCase : Any = '''tests/test_samples/test_command_file.sh'''
__UpperCAmelCase : List[Any] = '''Running gcloud compute tpus tpu-vm ssh'''
def __lowercase ( self : Dict ):
'''simple docstring'''
_a : Optional[Any] = run_command(
self.cmd
+ ['--command', self.command, '--tpu_zone', self.tpu_zone, '--tpu_name', self.tpu_name, '--debug'] ,return_stdout=_a ,)
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" ,_a ,)
def __lowercase ( self : List[str] ):
'''simple docstring'''
_a : Any = run_command(
self.cmd
+ [
'--config_file',
'tests/test_configs/0_12_0.yaml',
'--command',
self.command,
'--tpu_zone',
self.tpu_zone,
'--tpu_name',
self.tpu_name,
'--debug',
] ,return_stdout=_a ,)
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" ,_a ,)
def __lowercase ( self : List[str] ):
'''simple docstring'''
_a : Optional[int] = run_command(
self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--debug'] ,return_stdout=_a )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" ,_a ,)
def __lowercase ( self : int ):
'''simple docstring'''
_a : Optional[Any] = run_command(
self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--command', self.command, '--debug'] ,return_stdout=_a ,)
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" ,_a ,)
def __lowercase ( self : str ):
'''simple docstring'''
_a : List[str] = run_command(
self.cmd
+ [
'--config_file',
'tests/test_configs/latest.yaml',
'--command',
self.command,
'--command',
'echo "Hello World"',
'--debug',
] ,return_stdout=_a ,)
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo \"Hello World\" --worker all""" ,_a ,)
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
_a : Any = run_command(
self.cmd
+ ['--config_file', 'tests/test_configs/latest.yaml', '--command_file', self.command_file, '--debug'] ,return_stdout=_a ,)
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" ,_a ,)
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
_a : Union[str, Any] = run_command(
self.cmd
+ [
'--config_file',
'tests/test_configs/0_12_0.yaml',
'--command_file',
self.command_file,
'--tpu_zone',
self.tpu_zone,
'--tpu_name',
self.tpu_name,
'--debug',
] ,return_stdout=_a ,)
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" ,_a ,)
def __lowercase ( self : Any ):
'''simple docstring'''
_a : Optional[int] = run_command(
self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--install_accelerate', '--debug'] ,return_stdout=_a ,)
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo \"hello world\"; echo \"this is a second command\" --worker all""" ,_a ,)
def __lowercase ( self : List[str] ):
'''simple docstring'''
_a : Optional[int] = run_command(
self.cmd
+ [
'--config_file',
'tests/test_configs/latest.yaml',
'--install_accelerate',
'--accelerate_version',
'12.0.0',
'--debug',
] ,return_stdout=_a ,)
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo \"hello world\"; echo \"this is a second command\" --worker all""" ,_a ,)
| 271 | 1 |
'''simple docstring'''
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = '''Speech2TextFeatureExtractor'''
__UpperCAmelCase : Any = '''Speech2TextTokenizer'''
def __init__( self : Tuple ,_a : List[str] ,_a : Any ):
'''simple docstring'''
super().__init__(_a ,_a )
_a : str = self.feature_extractor
_a : List[str] = False
def __call__( self : List[str] ,*_a : str ,**_a : List[Any] ):
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor(*_a ,**_a )
if "raw_speech" in kwargs:
warnings.warn('Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.' )
_a : Optional[int] = kwargs.pop('raw_speech' )
else:
_a : Optional[Any] = kwargs.pop('audio' ,_a )
_a : Any = kwargs.pop('sampling_rate' ,_a )
_a : str = kwargs.pop('text' ,_a )
if len(_a ) > 0:
_a : str = args[0]
_a : Dict = args[1:]
if audio is None and text is None:
raise ValueError('You need to specify either an `audio` or `text` input to process.' )
if audio is not None:
_a : Optional[Any] = self.feature_extractor(_a ,*_a ,sampling_rate=_a ,**_a )
if text is not None:
_a : Any = self.tokenizer(_a ,**_a )
if text is None:
return inputs
elif audio is None:
return encodings
else:
_a : int = encodings['input_ids']
return inputs
def __lowercase ( self : List[str] ,*_a : Optional[Any] ,**_a : Union[str, Any] ):
'''simple docstring'''
return self.tokenizer.batch_decode(*_a ,**_a )
def __lowercase ( self : List[Any] ,*_a : Optional[Any] ,**_a : Optional[Any] ):
'''simple docstring'''
return self.tokenizer.decode(*_a ,**_a )
@contextmanager
def __lowercase ( self : List[str] ):
'''simple docstring'''
warnings.warn(
'`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '
'labels by using the argument `text` of the regular `__call__` method (either in the same call as '
'your audio inputs, or in a separate call.' )
_a : int = True
_a : Dict = self.tokenizer
yield
_a : Any = self.feature_extractor
_a : Any = False
| 271 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
__lowerCAmelCase = TypeVar("""T""")
class UpperCAmelCase__ ( Generic[T] ):
"""simple docstring"""
def __init__( self : Tuple ,_a : T ):
'''simple docstring'''
_a : List[str] = data
_a : Node[T] | None = None
def __str__( self : Dict ):
'''simple docstring'''
return F"""{self.data}"""
class UpperCAmelCase__ ( Generic[T] ):
"""simple docstring"""
def __init__( self : Optional[int] ):
'''simple docstring'''
_a : Node[T] | None = None
def __iter__( self : str ):
'''simple docstring'''
_a : Tuple = self.top
while node:
yield node.data
_a : int = node.next
def __str__( self : str ):
'''simple docstring'''
return "->".join([str(_a ) for item in self] )
def __len__( self : Optional[Any] ):
'''simple docstring'''
return len(tuple(iter(self ) ) )
def __lowercase ( self : str ):
'''simple docstring'''
return self.top is None
def __lowercase ( self : List[Any] ,_a : T ):
'''simple docstring'''
_a : int = Node(_a )
if not self.is_empty():
_a : Optional[Any] = self.top
_a : List[str] = node
def __lowercase ( self : Tuple ):
'''simple docstring'''
if self.is_empty():
raise IndexError('pop from empty stack' )
assert isinstance(self.top ,_a )
_a : List[Any] = self.top
_a : int = self.top.next
return pop_node.data
def __lowercase ( self : List[str] ):
'''simple docstring'''
if self.is_empty():
raise IndexError('peek from empty stack' )
assert self.top is not None
return self.top.data
def __lowercase ( self : List[str] ):
'''simple docstring'''
_a : Optional[int] = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 271 | 1 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import (
AutoencoderKL,
EulerDiscreteScheduler,
StableDiffusionLatentUpscalePipeline,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
def UpperCAmelCase_ (__a : Dict ):
"""simple docstring"""
_a : Union[str, Any] = [tensor.shape for tensor in tensor_list]
return all(shape == shapes[0] for shape in shapes[1:] )
class UpperCAmelCase__ ( lowercase__ , lowercase__ , lowercase__ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : List[Any] = StableDiffusionLatentUpscalePipeline
__UpperCAmelCase : Any = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
'''height''',
'''width''',
'''cross_attention_kwargs''',
'''negative_prompt_embeds''',
'''prompt_embeds''',
}
__UpperCAmelCase : Optional[int] = PipelineTesterMixin.required_optional_params - {'''num_images_per_prompt'''}
__UpperCAmelCase : Union[str, Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__UpperCAmelCase : Any = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__UpperCAmelCase : Dict = frozenset([] )
__UpperCAmelCase : Tuple = True
@property
def __lowercase ( self : List[Any] ):
'''simple docstring'''
_a : Dict = 1
_a : int = 4
_a : Union[str, Any] = (16, 16)
_a : List[str] = floats_tensor((batch_size, num_channels) + sizes ,rng=random.Random(0 ) ).to(_a )
return image
def __lowercase ( self : List[str] ):
'''simple docstring'''
torch.manual_seed(0 )
_a : Optional[int] = UNetaDConditionModel(
act_fn='gelu' ,attention_head_dim=8 ,norm_num_groups=_a ,block_out_channels=[32, 32, 64, 64] ,time_cond_proj_dim=160 ,conv_in_kernel=1 ,conv_out_kernel=1 ,cross_attention_dim=32 ,down_block_types=(
'KDownBlock2D',
'KCrossAttnDownBlock2D',
'KCrossAttnDownBlock2D',
'KCrossAttnDownBlock2D',
) ,in_channels=8 ,mid_block_type=_a ,only_cross_attention=_a ,out_channels=5 ,resnet_time_scale_shift='scale_shift' ,time_embedding_type='fourier' ,timestep_post_act='gelu' ,up_block_types=('KCrossAttnUpBlock2D', 'KCrossAttnUpBlock2D', 'KCrossAttnUpBlock2D', 'KUpBlock2D') ,)
_a : Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 32, 64, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=[
'DownEncoderBlock2D',
'DownEncoderBlock2D',
'DownEncoderBlock2D',
'DownEncoderBlock2D',
] ,up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D', 'UpDecoderBlock2D', 'UpDecoderBlock2D'] ,latent_channels=4 ,)
_a : Optional[Any] = EulerDiscreteScheduler(prediction_type='sample' )
_a : List[Any] = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,hidden_act='quick_gelu' ,projection_dim=512 ,)
_a : Tuple = CLIPTextModel(_a )
_a : Optional[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
_a : List[Any] = {
'unet': model.eval(),
'vae': vae.eval(),
'scheduler': scheduler,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
}
return components
def __lowercase ( self : List[str] ,_a : int ,_a : List[str]=0 ):
'''simple docstring'''
if str(_a ).startswith('mps' ):
_a : List[Any] = torch.manual_seed(_a )
else:
_a : Dict = torch.Generator(device=_a ).manual_seed(_a )
_a : int = {
'prompt': 'A painting of a squirrel eating a burger',
'image': self.dummy_image.cpu(),
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def __lowercase ( self : int ):
'''simple docstring'''
_a : str = 'cpu'
_a : List[Any] = self.get_dummy_components()
_a : Any = self.pipeline_class(**_a )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
_a : str = self.get_dummy_inputs(_a )
_a : Optional[int] = pipe(**_a ).images
_a : Optional[int] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape ,(1, 256, 256, 3) )
_a : Union[str, Any] = np.array(
[0.4722_2412, 0.4192_1633, 0.4471_7434, 0.4687_4192, 0.4258_8258, 0.4615_0726, 0.467_7534, 0.4558_3832, 0.4857_9055] )
_a : int = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_a ,1E-3 )
def __lowercase ( self : Tuple ):
'''simple docstring'''
super().test_attention_slicing_forward_pass(expected_max_diff=7E-3 )
def __lowercase ( self : List[Any] ):
'''simple docstring'''
super().test_cpu_offload_forward_pass(expected_max_diff=3E-3 )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=7E-3 )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3E-3 )
def __lowercase ( self : int ):
'''simple docstring'''
super().test_save_load_local(expected_max_difference=3E-3 )
def __lowercase ( self : str ):
'''simple docstring'''
super().test_save_load_optional_components(expected_max_difference=3E-3 )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
_a : List[Any] = [
'DDIMScheduler',
'DDPMScheduler',
'PNDMScheduler',
'HeunDiscreteScheduler',
'EulerAncestralDiscreteScheduler',
'KDPM2DiscreteScheduler',
'KDPM2AncestralDiscreteScheduler',
'DPMSolverSDEScheduler',
]
_a : Optional[int] = self.get_dummy_components()
_a : Optional[int] = self.pipeline_class(**_a )
# make sure that PNDM does not need warm-up
pipe.scheduler.register_to_config(skip_prk_steps=_a )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
_a : Union[str, Any] = self.get_dummy_inputs(_a )
_a : Tuple = 2
_a : List[str] = []
for scheduler_enum in KarrasDiffusionSchedulers:
if scheduler_enum.name in skip_schedulers:
# no sigma schedulers are not supported
# no schedulers
continue
_a : int = getattr(_a ,scheduler_enum.name )
_a : Union[str, Any] = scheduler_cls.from_config(pipe.scheduler.config )
_a : List[str] = pipe(**_a )[0]
outputs.append(_a )
assert check_same_shape(_a )
@require_torch_gpu
@slow
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self : str ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
_a : Any = torch.manual_seed(33 )
_a : List[Any] = StableDiffusionPipeline.from_pretrained('CompVis/stable-diffusion-v1-4' ,torch_dtype=torch.floataa )
pipe.to('cuda' )
_a : Optional[Any] = StableDiffusionLatentUpscalePipeline.from_pretrained(
'stabilityai/sd-x2-latent-upscaler' ,torch_dtype=torch.floataa )
upscaler.to('cuda' )
_a : Tuple = 'a photo of an astronaut high resolution, unreal engine, ultra realistic'
_a : Union[str, Any] = pipe(_a ,generator=_a ,output_type='latent' ).images
_a : List[Any] = upscaler(
prompt=_a ,image=_a ,num_inference_steps=20 ,guidance_scale=0 ,generator=_a ,output_type='np' ,).images[0]
_a : Tuple = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy' )
assert np.abs((expected_image - image).mean() ) < 5E-2
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
_a : Optional[int] = torch.manual_seed(33 )
_a : Union[str, Any] = StableDiffusionLatentUpscalePipeline.from_pretrained(
'stabilityai/sd-x2-latent-upscaler' ,torch_dtype=torch.floataa )
upscaler.to('cuda' )
_a : Tuple = 'the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas'
_a : str = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png' )
_a : List[str] = upscaler(
prompt=_a ,image=_a ,num_inference_steps=20 ,guidance_scale=0 ,generator=_a ,output_type='np' ,).images[0]
_a : Tuple = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy' )
assert np.abs((expected_image - image).max() ) < 5E-2
| 271 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@property
def __lowercase ( self : List[str] ):
'''simple docstring'''
torch.manual_seed(0 )
_a : int = UNetaDModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=3 ,out_channels=3 ,down_block_types=('DownBlock2D', 'AttnDownBlock2D') ,up_block_types=('AttnUpBlock2D', 'UpBlock2D') ,)
return model
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
_a : str = self.dummy_uncond_unet
_a : int = PNDMScheduler()
_a : str = PNDMPipeline(unet=_a ,scheduler=_a )
pndm.to(_a )
pndm.set_progress_bar_config(disable=_a )
_a : Optional[int] = torch.manual_seed(0 )
_a : Optional[Any] = pndm(generator=_a ,num_inference_steps=20 ,output_type='numpy' ).images
_a : List[str] = torch.manual_seed(0 )
_a : Any = pndm(generator=_a ,num_inference_steps=20 ,output_type='numpy' ,return_dict=_a )[0]
_a : List[Any] = image[0, -3:, -3:, -1]
_a : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_a : List[Any] = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self : Tuple ):
'''simple docstring'''
_a : List[str] = 'google/ddpm-cifar10-32'
_a : str = UNetaDModel.from_pretrained(_a )
_a : Union[str, Any] = PNDMScheduler()
_a : Tuple = PNDMPipeline(unet=_a ,scheduler=_a )
pndm.to(_a )
pndm.set_progress_bar_config(disable=_a )
_a : str = torch.manual_seed(0 )
_a : Optional[Any] = pndm(generator=_a ,output_type='numpy' ).images
_a : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_a : Tuple = np.array([0.1564, 0.1_4645, 0.1406, 0.1_4715, 0.1_2425, 0.1_4045, 0.1_3115, 0.1_2175, 0.125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 271 | 1 |
'''simple docstring'''
import inspect
import os
import torch
from transformers import AutoModel
from transformers.testing_utils import mockenv_context
from transformers.trainer_utils import set_seed
import accelerate
from accelerate.accelerator import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils.testing import (
AccelerateTestCase,
TempDirTestCase,
execute_subprocess_async,
require_cuda,
require_fsdp,
require_multi_gpu,
slow,
)
from accelerate.utils.constants import (
FSDP_AUTO_WRAP_POLICY,
FSDP_BACKWARD_PREFETCH,
FSDP_SHARDING_STRATEGY,
FSDP_STATE_DICT_TYPE,
)
from accelerate.utils.dataclasses import FullyShardedDataParallelPlugin
from accelerate.utils.other import patch_environment
set_seed(4_2)
__lowerCAmelCase = """bert-base-cased"""
__lowerCAmelCase = """fp16"""
__lowerCAmelCase = """bf16"""
__lowerCAmelCase = [FPaa, BFaa]
@require_fsdp
@require_cuda
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
def __lowercase ( self : Dict ):
'''simple docstring'''
super().setUp()
_a : int = dict(
ACCELERATE_USE_FSDP='true' ,MASTER_ADDR='localhost' ,MASTER_PORT='10999' ,RANK='0' ,LOCAL_RANK='0' ,WORLD_SIZE='1' ,)
def __lowercase ( self : Any ):
'''simple docstring'''
from torch.distributed.fsdp.fully_sharded_data_parallel import ShardingStrategy
for i, strategy in enumerate(_a ):
_a : Union[str, Any] = self.dist_env.copy()
_a : Optional[Any] = F"""{i + 1}"""
_a : str = strategy
with mockenv_context(**_a ):
_a : Optional[int] = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.sharding_strategy ,ShardingStrategy(i + 1 ) )
def __lowercase ( self : str ):
'''simple docstring'''
from torch.distributed.fsdp.fully_sharded_data_parallel import BackwardPrefetch
for i, prefetch_policy in enumerate(_a ):
_a : Tuple = self.dist_env.copy()
_a : List[str] = prefetch_policy
with mockenv_context(**_a ):
_a : Optional[int] = FullyShardedDataParallelPlugin()
if prefetch_policy == "NO_PREFETCH":
self.assertIsNone(fsdp_plugin.backward_prefetch )
else:
self.assertEqual(fsdp_plugin.backward_prefetch ,BackwardPrefetch(i + 1 ) )
def __lowercase ( self : Tuple ):
'''simple docstring'''
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
for i, state_dict_type in enumerate(_a ):
_a : List[Any] = self.dist_env.copy()
_a : str = state_dict_type
with mockenv_context(**_a ):
_a : Optional[Any] = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.state_dict_type ,StateDictType(i + 1 ) )
if state_dict_type == "FULL_STATE_DICT":
self.assertTrue(fsdp_plugin.state_dict_config.offload_to_cpu )
self.assertTrue(fsdp_plugin.state_dict_config.ranka_only )
def __lowercase ( self : List[Any] ):
'''simple docstring'''
_a : Dict = AutoModel.from_pretrained(_a )
for policy in FSDP_AUTO_WRAP_POLICY:
_a : Optional[int] = self.dist_env.copy()
_a : Union[str, Any] = policy
if policy == "TRANSFORMER_BASED_WRAP":
_a : int = 'BertLayer'
elif policy == "SIZE_BASED_WRAP":
_a : Optional[int] = '2000'
with mockenv_context(**_a ):
_a : Union[str, Any] = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(_a )
if policy == "NO_WRAP":
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
else:
self.assertIsNotNone(fsdp_plugin.auto_wrap_policy )
_a : Tuple = self.dist_env.copy()
_a : List[str] = 'TRANSFORMER_BASED_WRAP'
_a : Optional[Any] = 'T5Layer'
with mockenv_context(**_a ):
_a : int = FullyShardedDataParallelPlugin()
with self.assertRaises(_a ) as cm:
fsdp_plugin.set_auto_wrap_policy(_a )
self.assertTrue('Could not find the transformer layer class to wrap in the model.' in str(cm.exception ) )
_a : Tuple = self.dist_env.copy()
_a : Optional[int] = 'SIZE_BASED_WRAP'
_a : str = '0'
with mockenv_context(**_a ):
_a : Optional[int] = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(_a )
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
def __lowercase ( self : int ):
'''simple docstring'''
from torch.distributed.fsdp.fully_sharded_data_parallel import MixedPrecision
from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler
for mp_dtype in dtypes:
_a : Any = self.dist_env.copy()
_a : List[str] = mp_dtype
with mockenv_context(**_a ):
_a : Dict = Accelerator()
if mp_dtype == "fp16":
_a : List[str] = torch.floataa
elif mp_dtype == "bf16":
_a : Any = torch.bfloataa
_a : Any = MixedPrecision(param_dtype=_a ,reduce_dtype=_a ,buffer_dtype=_a )
self.assertEqual(accelerator.state.fsdp_plugin.mixed_precision_policy ,_a )
if mp_dtype == FPaa:
self.assertTrue(isinstance(accelerator.scaler ,_a ) )
elif mp_dtype == BFaa:
self.assertIsNone(accelerator.scaler )
AcceleratorState._reset_state(_a )
def __lowercase ( self : str ):
'''simple docstring'''
from torch.distributed.fsdp.fully_sharded_data_parallel import CPUOffload
for flag in [True, False]:
_a : List[str] = self.dist_env.copy()
_a : Tuple = str(_a ).lower()
with mockenv_context(**_a ):
_a : List[str] = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.cpu_offload ,CPUOffload(offload_params=_a ) )
@require_fsdp
@require_multi_gpu
@slow
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
def __lowercase ( self : List[str] ):
'''simple docstring'''
super().setUp()
_a : int = 0.82
_a : Union[str, Any] = [
'fsdp_shard_grad_op_transformer_based_wrap',
'fsdp_full_shard_transformer_based_wrap',
]
_a : Dict = {
'multi_gpu_fp16': 3200,
'fsdp_shard_grad_op_transformer_based_wrap_fp16': 2000,
'fsdp_full_shard_transformer_based_wrap_fp16': 1900,
# Disabling below test as it overwhelms the RAM memory usage
# on CI self-hosted runner leading to tests getting killed.
# "fsdp_full_shard_cpu_offload_transformer_based_wrap_fp32": 1500, # fp16 was leading to indefinite hang
}
_a : Tuple = 160
_a : Dict = 160
_a : List[str] = inspect.getfile(accelerate.test_utils )
_a : Optional[Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'external_deps'] )
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
_a : List[str] = os.path.join(self.test_scripts_folder ,'test_performance.py' )
_a : Any = ['accelerate', 'launch', '--num_processes=2', '--num_machines=1', '--machine_rank=0', '--use_fsdp']
for config in self.performance_configs:
_a : int = cmd.copy()
for i, strategy in enumerate(_a ):
if strategy.lower() in config:
cmd_config.append(F"""--fsdp_sharding_strategy={i+1}""" )
break
if "fp32" in config:
cmd_config.append('--mixed_precision=no' )
else:
cmd_config.append('--mixed_precision=fp16' )
if "cpu_offload" in config:
cmd_config.append('--fsdp_offload_params=True' )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in config:
cmd_config.append(F"""--fsdp_auto_wrap_policy={policy}""" )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append('--fsdp_transformer_layer_cls_to_wrap=BertLayer' )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append('--fsdp_min_num_params=2000' )
cmd_config.extend(
[
self.test_file_path,
F"""--output_dir={self.tmpdir}""",
F"""--performance_lower_bound={self.performance_lower_bound}""",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_a ,env=os.environ.copy() )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
_a : Optional[Any] = os.path.join(self.test_scripts_folder ,'test_checkpointing.py' )
_a : Union[str, Any] = [
'accelerate',
'launch',
'--num_processes=2',
'--num_machines=1',
'--machine_rank=0',
'--use_fsdp',
'--mixed_precision=fp16',
'--fsdp_transformer_layer_cls_to_wrap=BertLayer',
]
for i, strategy in enumerate(_a ):
_a : int = cmd.copy()
cmd_config.append(F"""--fsdp_sharding_strategy={i+1}""" )
if strategy != "FULL_SHARD":
continue
_a : int = len(_a )
for state_dict_type in FSDP_STATE_DICT_TYPE:
_a : str = cmd_config[:state_dict_config_index]
cmd_config.append(F"""--fsdp_state_dict_type={state_dict_type}""" )
cmd_config.extend(
[
self.test_file_path,
F"""--output_dir={self.tmpdir}""",
'--partial_train_epoch=1',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_a ,env=os.environ.copy() )
_a : Optional[int] = cmd_config[:-1]
_a : Tuple = os.path.join(self.tmpdir ,'epoch_0' )
cmd_config.extend(
[
F"""--resume_from_checkpoint={resume_from_checkpoint}""",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_a ,env=os.environ.copy() )
def __lowercase ( self : List[str] ):
'''simple docstring'''
_a : int = os.path.join(self.test_scripts_folder ,'test_peak_memory_usage.py' )
_a : Optional[int] = [
'accelerate',
'launch',
'--num_processes=2',
'--num_machines=1',
'--machine_rank=0',
]
for spec, peak_mem_upper_bound in self.peak_memory_usage_upper_bound.items():
_a : Any = cmd.copy()
if "fp16" in spec:
cmd_config.extend(['--mixed_precision=fp16'] )
else:
cmd_config.extend(['--mixed_precision=no'] )
if "multi_gpu" in spec:
continue
else:
cmd_config.extend(['--use_fsdp'] )
for i, strategy in enumerate(_a ):
if strategy.lower() in spec:
cmd_config.append(F"""--fsdp_sharding_strategy={i+1}""" )
break
if "cpu_offload" in spec:
cmd_config.append('--fsdp_offload_params=True' )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in spec:
cmd_config.append(F"""--fsdp_auto_wrap_policy={policy}""" )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append('--fsdp_transformer_layer_cls_to_wrap=BertLayer' )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append('--fsdp_min_num_params=2000' )
cmd_config.extend(
[
self.test_file_path,
F"""--output_dir={self.tmpdir}""",
F"""--peak_memory_upper_bound={peak_mem_upper_bound}""",
F"""--n_train={self.n_train}""",
F"""--n_val={self.n_val}""",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_a ,env=os.environ.copy() )
| 271 |
'''simple docstring'''
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
__lowerCAmelCase = logging.getLogger()
@unittest.skip('''Temporarily disable the doc tests.''' )
@require_torch
@require_tf
@slow
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self : str ,_a : Path ,_a : Union[str, None] = None ,_a : Union[List[str], None] = None ,_a : Union[str, List[str], None] = None ,_a : bool = True ,):
'''simple docstring'''
_a : Optional[int] = [file for file in os.listdir(_a ) if os.path.isfile(os.path.join(_a ,_a ) )]
if identifier is not None:
_a : List[str] = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(_a ,_a ):
for n_ in n_identifier:
_a : Tuple = [file for file in files if n_ not in file]
else:
_a : Optional[Any] = [file for file in files if n_identifier not in file]
_a : List[str] = ignore_files or []
ignore_files.append('__init__.py' )
_a : Tuple = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print('Testing' ,_a )
if only_modules:
_a : Any = file.split('.' )[0]
try:
_a : List[str] = getattr(_a ,_a )
_a : int = doctest.DocTestSuite(_a )
_a : Any = unittest.TextTestRunner().run(_a )
self.assertIs(len(result.failures ) ,0 )
except AttributeError:
logger.info(F"""{module_identifier} is not a module.""" )
else:
_a : Union[str, Any] = doctest.testfile(str('..' / directory / file ) ,optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed ,0 )
def __lowercase ( self : Any ):
'''simple docstring'''
_a : int = Path('src/transformers' )
_a : List[Any] = 'modeling'
_a : Optional[Any] = [
'modeling_ctrl.py',
'modeling_tf_ctrl.py',
]
self.analyze_directory(_a ,identifier=_a ,ignore_files=_a )
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
_a : Optional[Any] = Path('src/transformers' )
_a : Optional[Any] = 'tokenization'
self.analyze_directory(_a ,identifier=_a )
def __lowercase ( self : List[str] ):
'''simple docstring'''
_a : Dict = Path('src/transformers' )
_a : str = 'configuration'
self.analyze_directory(_a ,identifier=_a )
def __lowercase ( self : Any ):
'''simple docstring'''
_a : Tuple = Path('src/transformers' )
_a : List[Any] = ['configuration', 'modeling', 'tokenization']
self.analyze_directory(_a ,n_identifier=_a )
def __lowercase ( self : Tuple ):
'''simple docstring'''
_a : List[Any] = Path('docs/source' )
_a : List[str] = ['favicon.ico']
self.analyze_directory(_a ,ignore_files=_a ,only_modules=_a )
| 271 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Union[str, Any] ,_a : int ,_a : List[Any]=7 ,_a : Any=3 ,_a : Optional[Any]=18 ,_a : Any=30 ,_a : List[str]=400 ,_a : Dict=True ,_a : int=None ,_a : Optional[Any]=True ,_a : str=None ,_a : Optional[int]=True ,_a : Any=[0.4814_5466, 0.457_8275, 0.4082_1073] ,_a : str=[0.2686_2954, 0.2613_0258, 0.2757_7711] ,_a : Optional[int]=True ,):
'''simple docstring'''
_a : Tuple = size if size is not None else {'height': 224, 'width': 224}
_a : Any = crop_size if crop_size is not None else {'height': 18, 'width': 18}
_a : Optional[Any] = parent
_a : List[str] = batch_size
_a : Union[str, Any] = num_channels
_a : str = image_size
_a : Dict = min_resolution
_a : Union[str, Any] = max_resolution
_a : List[Any] = do_resize
_a : Optional[int] = size
_a : int = do_center_crop
_a : Any = crop_size
_a : Optional[int] = do_normalize
_a : Union[str, Any] = image_mean
_a : Union[str, Any] = image_std
_a : Any = do_convert_rgb
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def __lowercase ( self : Optional[int] ,_a : Optional[Any]=False ,_a : Union[str, Any]=False ,_a : int=False ):
'''simple docstring'''
assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time"
if equal_resolution:
_a : Dict = []
for i in range(self.batch_size ):
image_inputs.append(
np.random.randint(
255 ,size=(self.num_channels, self.max_resolution, self.max_resolution) ,dtype=np.uinta ) )
else:
_a : int = []
for i in range(self.batch_size ):
_a, _a : Union[str, Any] = np.random.choice(np.arange(self.min_resolution ,self.max_resolution ) ,2 )
image_inputs.append(np.random.randint(255 ,size=(self.num_channels, width, height) ,dtype=np.uinta ) )
if not numpify and not torchify:
# PIL expects the channel dimension as last dimension
_a : List[Any] = [Image.fromarray(np.moveaxis(_a ,0 ,-1 ) ) for x in image_inputs]
if torchify:
_a : Any = [torch.from_numpy(_a ) for x in image_inputs]
return image_inputs
@require_torch
@require_vision
class UpperCAmelCase__ ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = ChineseCLIPImageProcessor if is_vision_available() else None
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
_a : int = ChineseCLIPImageProcessingTester(self ,do_center_crop=_a )
@property
def __lowercase ( self : Tuple ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __lowercase ( self : List[Any] ):
'''simple docstring'''
_a : List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_a ,'do_resize' ) )
self.assertTrue(hasattr(_a ,'size' ) )
self.assertTrue(hasattr(_a ,'do_center_crop' ) )
self.assertTrue(hasattr(_a ,'center_crop' ) )
self.assertTrue(hasattr(_a ,'do_normalize' ) )
self.assertTrue(hasattr(_a ,'image_mean' ) )
self.assertTrue(hasattr(_a ,'image_std' ) )
self.assertTrue(hasattr(_a ,'do_convert_rgb' ) )
def __lowercase ( self : Tuple ):
'''simple docstring'''
_a : List[str] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{'height': 224, 'width': 224} )
self.assertEqual(image_processor.crop_size ,{'height': 18, 'width': 18} )
_a : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 ,crop_size=84 )
self.assertEqual(image_processor.size ,{'shortest_edge': 42} )
self.assertEqual(image_processor.crop_size ,{'height': 84, 'width': 84} )
def __lowercase ( self : List[str] ):
'''simple docstring'''
pass
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
_a : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_a : Dict = self.image_processor_tester.prepare_inputs(equal_resolution=_a )
for image in image_inputs:
self.assertIsInstance(_a ,Image.Image )
# Test not batched input
_a : Optional[int] = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
# Test batched
_a : str = image_processing(_a ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
def __lowercase ( self : List[Any] ):
'''simple docstring'''
_a : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_a : Optional[int] = self.image_processor_tester.prepare_inputs(equal_resolution=_a ,numpify=_a )
for image in image_inputs:
self.assertIsInstance(_a ,np.ndarray )
# Test not batched input
_a : List[Any] = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
# Test batched
_a : Any = image_processing(_a ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
def __lowercase ( self : Dict ):
'''simple docstring'''
_a : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_a : int = self.image_processor_tester.prepare_inputs(equal_resolution=_a ,torchify=_a )
for image in image_inputs:
self.assertIsInstance(_a ,torch.Tensor )
# Test not batched input
_a : List[Any] = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
# Test batched
_a : Dict = image_processing(_a ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
@require_torch
@require_vision
class UpperCAmelCase__ ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : List[Any] = ChineseCLIPImageProcessor if is_vision_available() else None
def __lowercase ( self : str ):
'''simple docstring'''
_a : Union[str, Any] = ChineseCLIPImageProcessingTester(self ,num_channels=4 ,do_center_crop=_a )
_a : Union[str, Any] = 3
@property
def __lowercase ( self : str ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
_a : str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_a ,'do_resize' ) )
self.assertTrue(hasattr(_a ,'size' ) )
self.assertTrue(hasattr(_a ,'do_center_crop' ) )
self.assertTrue(hasattr(_a ,'center_crop' ) )
self.assertTrue(hasattr(_a ,'do_normalize' ) )
self.assertTrue(hasattr(_a ,'image_mean' ) )
self.assertTrue(hasattr(_a ,'image_std' ) )
self.assertTrue(hasattr(_a ,'do_convert_rgb' ) )
def __lowercase ( self : Tuple ):
'''simple docstring'''
pass
def __lowercase ( self : List[str] ):
'''simple docstring'''
_a : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_a : List[str] = self.image_processor_tester.prepare_inputs(equal_resolution=_a )
for image in image_inputs:
self.assertIsInstance(_a ,Image.Image )
# Test not batched input
_a : Union[str, Any] = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
# Test batched
_a : Union[str, Any] = image_processing(_a ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
| 271 |
'''simple docstring'''
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCAmelCase_ (__a : Optional[Any] , __a : str , __a : Optional[Any]=None ):
"""simple docstring"""
assert torch_layer.weight.shape == weight.shape, f"""{torch_layer} layer.weight does not match"""
_a : str = nn.Parameter(__a )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, f"""{torch_layer} layer.bias does not match"""
_a : Any = nn.Parameter(__a )
def UpperCAmelCase_ (__a : int , __a : Optional[Any] , __a : int ):
"""simple docstring"""
_a : Tuple = np.asarray(weights[0] )
_a : Union[str, Any] = np.asarray(weights[1] )
_a : Dict = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(__a ).transpose(1 , 2 ).contiguous().view(-1 , __a ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(__a ).transpose(1 , 2 ).contiguous().view(-1 , __a ) , )
set_param(
torch_layer.output.dense , torch.tensor(__a ).view(-1 , __a ).contiguous().transpose(0 , 1 ) , )
def UpperCAmelCase_ (__a : Optional[Any] , __a : Optional[int] , __a : List[str] ):
"""simple docstring"""
_a : Dict = np.asarray(weights[0] )
_a : Union[str, Any] = np.asarray(weights[1] )
_a : str = np.asarray(weights[2] )
_a : int = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(__a ).transpose(1 , 2 ).contiguous().view(-1 , __a ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(__a ).transpose(1 , 2 ).contiguous().view(-1 , __a ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(__a ).transpose(1 , 2 ).contiguous().view(-1 , __a ) , )
set_param(
torch_layer.output.dense , torch.tensor(__a ).view(-1 , __a ).contiguous().transpose(0 , 1 ) , )
def UpperCAmelCase_ (__a : Any , __a : Any , __a : Optional[Any] ):
"""simple docstring"""
_a : List[str] = weights[0][0][0]
_a : List[Any] = np.asarray(layer_norm_a[0] )
_a : List[str] = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(__a ) , torch.tensor(__a ) , )
# lsh weights + output
_a : List[str] = weights[0][1]
if len(__a ) < 4:
set_layer_weights_in_torch_lsh(__a , torch_block.attention , __a )
else:
set_layer_weights_in_torch_local(__a , torch_block.attention , __a )
# intermediate weighs
_a : Optional[Any] = weights[2][0][1][2]
# Chunked Feed Forward
if len(__a ) == 4:
_a : Union[str, Any] = intermediate_weights[2]
# layernorm 2
_a : Any = np.asarray(intermediate_weights[0][0] )
_a : List[Any] = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(__a ) , torch.tensor(__a ) , )
# intermediate dense
_a : Any = np.asarray(intermediate_weights[1][0] )
_a : Any = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(__a ).transpose(0 , 1 ).contiguous() , torch.tensor(__a ) , )
# intermediate out
_a : Optional[int] = np.asarray(intermediate_weights[4][0] )
_a : int = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(__a ).transpose(0 , 1 ).contiguous() , torch.tensor(__a ) , )
def UpperCAmelCase_ (__a : Dict , __a : Dict , __a : List[Any] ):
"""simple docstring"""
_a : Optional[int] = torch_model.reformer
# word embeds
_a : Tuple = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(__a ) , )
if isinstance(weights[3] , __a ):
_a : Any = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
_a : List[Any] = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), f"""{position_embeddings[emb_idx]} emb does not match"""
_a : Any = nn.Parameter(torch.tensor(__a ) )
_a : List[str] = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
__a ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
_a : Tuple = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(__a , __a , __a )
# output layer norm
_a : Optional[Any] = np.asarray(weights[7][0] )
_a : int = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(__a ) , torch.tensor(__a ) , )
# output embeddings
_a : List[str] = np.asarray(weights[9][0] )
_a : int = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(__a ).transpose(0 , 1 ).contiguous() , torch.tensor(__a ) , )
def UpperCAmelCase_ (__a : Tuple , __a : Optional[Any] , __a : Dict ):
"""simple docstring"""
_a : List[Any] = ReformerConfig.from_json_file(__a )
print(f"""Building PyTorch model from configuration: {config}""" )
_a : int = ReformerModelWithLMHead(__a )
with open(__a , 'rb' ) as f:
_a : Optional[Any] = pickle.load(__a )['weights']
set_model_weights_in_torch(__a , __a , config.hidden_size )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , __a )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--trax_model_pkl_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained Reformer model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
__lowerCAmelCase = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 271 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase = {
"""configuration_clap""": [
"""CLAP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ClapAudioConfig""",
"""ClapConfig""",
"""ClapTextConfig""",
],
"""processing_clap""": ["""ClapProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
"""CLAP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ClapModel""",
"""ClapPreTrainedModel""",
"""ClapTextModel""",
"""ClapTextModelWithProjection""",
"""ClapAudioModel""",
"""ClapAudioModelWithProjection""",
]
__lowerCAmelCase = ["""ClapFeatureExtractor"""]
if TYPE_CHECKING:
from .configuration_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioConfig,
ClapConfig,
ClapTextConfig,
)
from .processing_clap import ClapProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clap import ClapFeatureExtractor
from .modeling_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioModel,
ClapAudioModelWithProjection,
ClapModel,
ClapPreTrainedModel,
ClapTextModel,
ClapTextModelWithProjection,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 271 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@property
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
torch.manual_seed(0 )
_a : Any = UNetaDModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=3 ,out_channels=3 ,down_block_types=('DownBlock2D', 'AttnDownBlock2D') ,up_block_types=('AttnUpBlock2D', 'UpBlock2D') ,)
return model
@property
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
torch.manual_seed(0 )
_a : Union[str, Any] = VQModel(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] ,up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] ,latent_channels=3 ,)
return model
@property
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
torch.manual_seed(0 )
_a : Any = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,)
return CLIPTextModel(_a )
def __lowercase ( self : Tuple ):
'''simple docstring'''
_a : Dict = self.dummy_uncond_unet
_a : List[Any] = DDIMScheduler()
_a : List[Any] = self.dummy_vq_model
_a : str = LDMPipeline(unet=_a ,vqvae=_a ,scheduler=_a )
ldm.to(_a )
ldm.set_progress_bar_config(disable=_a )
_a : List[str] = torch.manual_seed(0 )
_a : List[str] = ldm(generator=_a ,num_inference_steps=2 ,output_type='numpy' ).images
_a : List[str] = torch.manual_seed(0 )
_a : Union[str, Any] = ldm(generator=_a ,num_inference_steps=2 ,output_type='numpy' ,return_dict=_a )[0]
_a : Tuple = image[0, -3:, -3:, -1]
_a : Optional[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_a : int = np.array([0.8512, 0.818, 0.6411, 0.6808, 0.4465, 0.5618, 0.46, 0.6231, 0.5172] )
_a : Any = 1E-2 if torch_device != 'mps' else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
_a : List[str] = LDMPipeline.from_pretrained('CompVis/ldm-celebahq-256' )
ldm.to(_a )
ldm.set_progress_bar_config(disable=_a )
_a : Optional[int] = torch.manual_seed(0 )
_a : Dict = ldm(generator=_a ,num_inference_steps=5 ,output_type='numpy' ).images
_a : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
_a : Optional[int] = np.array([0.4399, 0.4_4975, 0.4_6825, 0.474, 0.4359, 0.4581, 0.4_5095, 0.4341, 0.4447] )
_a : int = 1E-2 if torch_device != 'mps' else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 271 | 1 |
'''simple docstring'''
# DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion
# and https://github.com/hojonathanho/diffusion
import math
from dataclasses import dataclass
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from diffusers.configuration_utils import ConfigMixin, register_to_config
from diffusers.schedulers.scheduling_utils import SchedulerMixin
from diffusers.utils import BaseOutput, deprecate
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
__UpperCAmelCase : torch.FloatTensor
__UpperCAmelCase : Optional[torch.FloatTensor] = None
def UpperCAmelCase_ (__a : Tuple , __a : Dict=0.999 , __a : List[Any]="cosine" , ):
"""simple docstring"""
if alpha_transform_type == "cosine":
def alpha_bar_fn(__a : List[str] ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(__a : Any ):
return math.exp(t * -12.0 )
else:
raise ValueError(f"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
_a : Union[str, Any] = []
for i in range(__a ):
_a : Any = i / num_diffusion_timesteps
_a : List[str] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(__a ) / alpha_bar_fn(__a ) , __a ) )
return torch.tensor(__a , dtype=torch.floataa )
class UpperCAmelCase__ ( lowercase__ , lowercase__ ):
"""simple docstring"""
__UpperCAmelCase : Dict = 1
@register_to_config
def __init__( self : int ,_a : int = 1000 ,_a : float = 0.0001 ,_a : float = 0.02 ,_a : str = "linear" ,_a : Optional[Union[np.ndarray, List[float]]] = None ,_a : bool = True ,_a : bool = True ,_a : int = 0 ,_a : str = "epsilon" ,_a : float = 1.0 ,**_a : Union[str, Any] ,):
'''simple docstring'''
if kwargs.get('set_alpha_to_one' ,_a ) is not None:
_a : str = (
'The `set_alpha_to_one` argument is deprecated. Please use `set_alpha_to_zero` instead.'
)
deprecate('set_alpha_to_one' ,'1.0.0' ,_a ,standard_warn=_a )
_a : Union[str, Any] = kwargs['set_alpha_to_one']
if trained_betas is not None:
_a : int = torch.tensor(_a ,dtype=torch.floataa )
elif beta_schedule == "linear":
_a : List[str] = torch.linspace(_a ,_a ,_a ,dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
_a : Union[str, Any] = (
torch.linspace(beta_start**0.5 ,beta_end**0.5 ,_a ,dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
_a : Any = betas_for_alpha_bar(_a )
else:
raise NotImplementedError(F"""{beta_schedule} does is not implemented for {self.__class__}""" )
_a : List[Any] = 1.0 - self.betas
_a : Dict = torch.cumprod(self.alphas ,dim=0 )
# At every step in inverted ddim, we are looking into the next alphas_cumprod
# For the final step, there is no next alphas_cumprod, and the index is out of bounds
# `set_alpha_to_zero` decides whether we set this parameter simply to zero
# in this case, self.step() just output the predicted noise
# or whether we use the final alpha of the "non-previous" one.
_a : int = torch.tensor(0.0 ) if set_alpha_to_zero else self.alphas_cumprod[-1]
# standard deviation of the initial noise distribution
_a : Optional[int] = 1.0
# setable values
_a : int = None
_a : Any = torch.from_numpy(np.arange(0 ,_a ).copy().astype(np.intaa ) )
def __lowercase ( self : List[Any] ,_a : torch.FloatTensor ,_a : Optional[int] = None ):
'''simple docstring'''
return sample
def __lowercase ( self : List[str] ,_a : int ,_a : Union[str, torch.device] = None ):
'''simple docstring'''
if num_inference_steps > self.config.num_train_timesteps:
raise ValueError(
F"""`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:"""
F""" {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle"""
F""" maximal {self.config.num_train_timesteps} timesteps.""" )
_a : str = num_inference_steps
_a : Optional[Any] = self.config.num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_a : Any = (np.arange(0 ,_a ) * step_ratio).round().copy().astype(np.intaa )
_a : List[Any] = torch.from_numpy(_a ).to(_a )
self.timesteps += self.config.steps_offset
def __lowercase ( self : List[str] ,_a : torch.FloatTensor ,_a : int ,_a : torch.FloatTensor ,_a : float = 0.0 ,_a : bool = False ,_a : Optional[torch.FloatTensor] = None ,_a : bool = True ,):
'''simple docstring'''
_a : Union[str, Any] = timestep + self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
# change original implementation to exactly match noise levels for analogous forward process
_a : List[str] = self.alphas_cumprod[timestep]
_a : List[str] = (
self.alphas_cumprod[prev_timestep]
if prev_timestep < self.config.num_train_timesteps
else self.final_alpha_cumprod
)
_a : Union[str, Any] = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
if self.config.prediction_type == "epsilon":
_a : str = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
_a : List[str] = model_output
elif self.config.prediction_type == "sample":
_a : int = model_output
_a : List[Any] = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
elif self.config.prediction_type == "v_prediction":
_a : Any = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
_a : Optional[Any] = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample
else:
raise ValueError(
F"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or"""
' `v_prediction`' )
# 4. Clip or threshold "predicted x_0"
if self.config.clip_sample:
_a : Optional[int] = pred_original_sample.clamp(
-self.config.clip_sample_range ,self.config.clip_sample_range )
# 5. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_a : Any = (1 - alpha_prod_t_prev) ** 0.5 * pred_epsilon
# 6. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_a : Dict = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if not return_dict:
return (prev_sample, pred_original_sample)
return DDIMSchedulerOutput(prev_sample=_a ,pred_original_sample=_a )
def __len__( self : List[str] ):
'''simple docstring'''
return self.config.num_train_timesteps
| 271 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
__lowerCAmelCase = logging.get_logger(__name__)
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
def __init__( self : int ,*_a : Optional[int] ,**_a : str ):
'''simple docstring'''
warnings.warn(
'The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use BeitImageProcessor instead.' ,_a ,)
super().__init__(*_a ,**_a )
| 271 | 1 |
'''simple docstring'''
import json
import os
import re
import unicodedata
from json.encoder import INFINITY
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import regex
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_flax_available, is_tf_available, is_torch_available, logging
from ...utils.generic import _is_jax, _is_numpy
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
"""artists_file""": """artists.json""",
"""lyrics_file""": """lyrics.json""",
"""genres_file""": """genres.json""",
}
__lowerCAmelCase = {
"""artists_file""": {
"""jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/artists.json""",
},
"""genres_file""": {
"""jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/genres.json""",
},
"""lyrics_file""": {
"""jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/lyrics.json""",
},
}
__lowerCAmelCase = {
"""jukebox""": 5_1_2,
}
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
__UpperCAmelCase : str = VOCAB_FILES_NAMES
__UpperCAmelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : Union[str, Any] = PRETRAINED_LYRIC_TOKENS_SIZES
__UpperCAmelCase : List[str] = ['''input_ids''', '''attention_mask''']
def __init__( self : str ,_a : Any ,_a : Dict ,_a : str ,_a : Any=["v3", "v2", "v2"] ,_a : Tuple=512 ,_a : int=5 ,_a : Any="<|endoftext|>" ,**_a : List[Any] ,):
'''simple docstring'''
_a : Tuple = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else unk_token
super().__init__(
unk_token=_a ,n_genres=_a ,version=_a ,max_n_lyric_tokens=_a ,**_a ,)
_a : int = version
_a : Dict = max_n_lyric_tokens
_a : Dict = n_genres
with open(_a ,encoding='utf-8' ) as vocab_handle:
_a : List[Any] = json.load(_a )
with open(_a ,encoding='utf-8' ) as vocab_handle:
_a : str = json.load(_a )
with open(_a ,encoding='utf-8' ) as vocab_handle:
_a : Union[str, Any] = json.load(_a )
_a : Optional[int] = R'[^A-Za-z0-9.,:;!?\-\'\"()\[\] \t\n]+'
# In v2, we had a n_vocab=80 and in v3 we missed + and so n_vocab=79 of characters.
if len(self.lyrics_encoder ) == 79:
_a : Union[str, Any] = oov.replace(R'\-\'' ,R'\-+\'' )
_a : str = regex.compile(_a )
_a : Union[str, Any] = {v: k for k, v in self.artists_encoder.items()}
_a : str = {v: k for k, v in self.genres_encoder.items()}
_a : Dict = {v: k for k, v in self.lyrics_encoder.items()}
@property
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
return len(self.artists_encoder ) + len(self.genres_encoder ) + len(self.lyrics_encoder )
def __lowercase ( self : Dict ):
'''simple docstring'''
return dict(self.artists_encoder ,self.genres_encoder ,self.lyrics_encoder )
def __lowercase ( self : Optional[int] ,_a : Union[str, Any] ,_a : Dict ,_a : Union[str, Any] ):
'''simple docstring'''
_a : Tuple = [self.artists_encoder.get(_a ,0 ) for artist in list_artists]
for genres in range(len(_a ) ):
_a : Dict = [self.genres_encoder.get(_a ,0 ) for genre in list_genres[genres]]
_a : Any = list_genres[genres] + [-1] * (self.n_genres - len(list_genres[genres] ))
_a : Optional[Any] = [[self.lyrics_encoder.get(_a ,0 ) for character in list_lyrics[0]], [], []]
return artists_id, list_genres, lyric_ids
def __lowercase ( self : Optional[Any] ,_a : Union[str, Any] ):
'''simple docstring'''
return list(_a )
def __lowercase ( self : str ,_a : int ,_a : Tuple ,_a : int ,**_a : List[str] ):
'''simple docstring'''
_a, _a, _a : str = self.prepare_for_tokenization(_a ,_a ,_a )
_a : List[Any] = self._tokenize(_a )
return artist, genre, lyrics
def __lowercase ( self : Any ,_a : str ,_a : str ,_a : str ,_a : bool = False ):
'''simple docstring'''
for idx in range(len(self.version ) ):
if self.version[idx] == "v3":
_a : Tuple = artists[idx].lower()
_a : List[str] = [genres[idx].lower()]
else:
_a : List[Any] = self._normalize(artists[idx] ) + '.v2'
_a : Union[str, Any] = [
self._normalize(_a ) + '.v2' for genre in genres[idx].split('_' )
] # split is for the full dictionary with combined genres
if self.version[0] == "v2":
_a : int = regex.compile(R'[^A-Za-z0-9.,:;!?\-\'\"()\[\] \t\n]+' )
_a : List[Any] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,:;!?-+\'\"()[] \t\n'
_a : List[str] = {vocab[index]: index + 1 for index in range(len(_a ) )}
_a : str = 0
_a : Dict = len(_a ) + 1
_a : Any = self.vocab
_a : str = {v: k for k, v in self.vocab.items()}
_a : Dict = ''
else:
_a : str = regex.compile(R'[^A-Za-z0-9.,:;!?\-+\'\"()\[\] \t\n]+' )
_a : str = self._run_strip_accents(_a )
_a : List[Any] = lyrics.replace('\\' ,'\n' )
_a : List[str] = self.out_of_vocab.sub('' ,_a ), [], []
return artists, genres, lyrics
def __lowercase ( self : Any ,_a : List[str] ):
'''simple docstring'''
_a : Dict = unicodedata.normalize('NFD' ,_a )
_a : Union[str, Any] = []
for char in text:
_a : Optional[Any] = unicodedata.category(_a )
if cat == "Mn":
continue
output.append(_a )
return "".join(_a )
def __lowercase ( self : Any ,_a : str ):
'''simple docstring'''
_a : Optional[Any] = (
[chr(_a ) for i in range(ord('a' ) ,ord('z' ) + 1 )]
+ [chr(_a ) for i in range(ord('A' ) ,ord('Z' ) + 1 )]
+ [chr(_a ) for i in range(ord('0' ) ,ord('9' ) + 1 )]
+ ['.']
)
_a : Any = frozenset(_a )
_a : Optional[int] = re.compile(R'_+' )
_a : Tuple = ''.join([c if c in accepted else '_' for c in text.lower()] )
_a : Any = pattern.sub('_' ,_a ).strip('_' )
return text
def __lowercase ( self : Tuple ,_a : List[str] ):
'''simple docstring'''
return " ".join(_a )
def __lowercase ( self : str ,_a : Optional[int] ,_a : Optional[Union[str, TensorType]] = None ,_a : bool = False ):
'''simple docstring'''
if not isinstance(_a ,_a ):
_a : List[str] = TensorType(_a )
# Get a function reference for the correct framework
if tensor_type == TensorType.TENSORFLOW:
if not is_tf_available():
raise ImportError(
'Unable to convert output to TensorFlow tensors format, TensorFlow is not installed.' )
import tensorflow as tf
_a : Dict = tf.constant
_a : str = tf.is_tensor
elif tensor_type == TensorType.PYTORCH:
if not is_torch_available():
raise ImportError('Unable to convert output to PyTorch tensors format, PyTorch is not installed.' )
import torch
_a : Tuple = torch.tensor
_a : List[Any] = torch.is_tensor
elif tensor_type == TensorType.JAX:
if not is_flax_available():
raise ImportError('Unable to convert output to JAX tensors format, JAX is not installed.' )
import jax.numpy as jnp # noqa: F811
_a : Optional[Any] = jnp.array
_a : Optional[Any] = _is_jax
else:
_a : Dict = np.asarray
_a : Optional[int] = _is_numpy
# Do the tensor conversion in batch
try:
if prepend_batch_axis:
_a : Union[str, Any] = [inputs]
if not is_tensor(_a ):
_a : str = as_tensor(_a )
except: # noqa E722
raise ValueError(
'Unable to create tensor, you should probably activate truncation and/or padding '
'with \'padding=True\' \'truncation=True\' to have batched tensors with the same length.' )
return inputs
def __call__( self : int ,_a : int ,_a : Optional[int] ,_a : Dict="" ,_a : Any="pt" ):
'''simple docstring'''
_a : Optional[Any] = [0, 0, 0]
_a : Dict = [artist] * len(self.version )
_a : List[Any] = [genres] * len(self.version )
_a, _a, _a : int = self.tokenize(_a ,_a ,_a )
_a, _a, _a : Dict = self._convert_token_to_id(_a ,_a ,_a )
_a : Optional[Any] = [-INFINITY] * len(full_tokens[-1] )
_a : List[Any] = [
self.convert_to_tensors(
[input_ids + [artists_id[i]] + genres_ids[i] + full_tokens[i]] ,tensor_type=_a )
for i in range(len(self.version ) )
]
return BatchEncoding({'input_ids': input_ids, 'attention_masks': attention_masks} )
def __lowercase ( self : int ,_a : str ,_a : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(_a ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_a : Union[str, Any] = os.path.join(
_a ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['artists_file'] )
with open(_a ,'w' ,encoding='utf-8' ) as f:
f.write(json.dumps(self.artists_encoder ,ensure_ascii=_a ) )
_a : List[str] = os.path.join(
_a ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['genres_file'] )
with open(_a ,'w' ,encoding='utf-8' ) as f:
f.write(json.dumps(self.genres_encoder ,ensure_ascii=_a ) )
_a : List[str] = os.path.join(
_a ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['lyrics_file'] )
with open(_a ,'w' ,encoding='utf-8' ) as f:
f.write(json.dumps(self.lyrics_encoder ,ensure_ascii=_a ) )
return (artists_file, genres_file, lyrics_file)
def __lowercase ( self : str ,_a : Dict ,_a : Dict ,_a : Optional[int] ):
'''simple docstring'''
_a : List[Any] = self.artists_decoder.get(_a )
_a : List[str] = [self.genres_decoder.get(_a ) for genre in genres_index]
_a : Union[str, Any] = [self.lyrics_decoder.get(_a ) for character in lyric_index]
return artist, genres, lyrics
| 271 |
'''simple docstring'''
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
"""linear""": get_linear_schedule_with_warmup,
"""cosine""": get_cosine_schedule_with_warmup,
"""cosine_w_restarts""": get_cosine_with_hard_restarts_schedule_with_warmup,
"""polynomial""": get_polynomial_decay_schedule_with_warmup,
"""constant""": get_constant_schedule,
"""constant_w_warmup""": get_constant_schedule_with_warmup,
}
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
def __init__( self : Optional[int] ,_a : Optional[Any]=None ,_a : Dict=None ,*_a : int ,**_a : str ):
'''simple docstring'''
super().__init__(*_a ,**_a )
if config is None:
assert isinstance(self.model ,_a ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
F""" {self.model.__class__}"""
)
_a : List[Any] = self.model.config
else:
_a : Optional[int] = config
_a : List[str] = data_args
_a : List[Any] = self.config.tgt_vocab_size if isinstance(self.config ,_a ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
F"""The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for"""
' padding..' )
if self.args.label_smoothing == 0:
_a : List[str] = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
_a : Tuple = label_smoothed_nll_loss
def __lowercase ( self : List[str] ,_a : int ):
'''simple docstring'''
if self.optimizer is None:
_a : Union[str, Any] = ['bias', 'LayerNorm.weight']
_a : Tuple = [
{
'params': [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
'weight_decay': self.args.weight_decay,
},
{
'params': [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
'weight_decay': 0.0,
},
]
_a : Optional[int] = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
_a : Any = Adafactor
_a : Dict = {'scale_parameter': False, 'relative_step': False}
else:
_a : Union[str, Any] = AdamW
_a : str = {
'betas': (self.args.adam_betaa, self.args.adam_betaa),
'eps': self.args.adam_epsilon,
}
_a : Union[str, Any] = self.args.learning_rate
if self.sharded_ddp:
_a : str = OSS(
params=_a ,optim=_a ,**_a ,)
else:
_a : Tuple = optimizer_cls(_a ,**_a )
if self.lr_scheduler is None:
_a : List[Any] = self._get_lr_scheduler(_a )
else: # ignoring --lr_scheduler
logger.warning('scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.' )
def __lowercase ( self : List[Any] ,_a : List[Any] ):
'''simple docstring'''
_a : str = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
_a : int = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
_a : List[str] = schedule_func(self.optimizer ,num_warmup_steps=self.args.warmup_steps )
else:
_a : Optional[int] = schedule_func(
self.optimizer ,num_warmup_steps=self.args.warmup_steps ,num_training_steps=_a )
return scheduler
def __lowercase ( self : Tuple ):
'''simple docstring'''
if isinstance(self.train_dataset ,torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size ,distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) ,)
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def __lowercase ( self : Dict ,_a : Dict ,_a : Any ,_a : Dict ):
'''simple docstring'''
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
_a : List[Any] = model(**_a ,use_cache=_a )[0]
_a : Union[str, Any] = self.loss_fn(logits.view(-1 ,logits.shape[-1] ) ,labels.view(-1 ) )
else:
# compute usual loss via models
_a, _a : Union[str, Any] = model(**_a ,labels=_a ,use_cache=_a )[:2]
else:
# compute label smoothed loss
_a : List[Any] = model(**_a ,use_cache=_a )[0]
_a : Any = torch.nn.functional.log_softmax(_a ,dim=-1 )
_a, _a : List[str] = self.loss_fn(_a ,_a ,self.args.label_smoothing ,ignore_index=self.config.pad_token_id )
return loss, logits
def __lowercase ( self : Optional[int] ,_a : Union[str, Any] ,_a : List[Any] ):
'''simple docstring'''
_a : Optional[int] = inputs.pop('labels' )
_a, _a : int = self._compute_loss(_a ,_a ,_a )
return loss
def __lowercase ( self : Optional[Any] ,_a : nn.Module ,_a : Dict[str, Union[torch.Tensor, Any]] ,_a : bool ,_a : Optional[List[str]] = None ,):
'''simple docstring'''
_a : int = self._prepare_inputs(_a )
_a : Any = {
'max_length': self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
'num_beams': self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
_a : int = self.model.generate(
inputs['input_ids'] ,attention_mask=inputs['attention_mask'] ,**_a ,)
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
_a : int = self._pad_tensors_to_max_len(_a ,gen_kwargs['max_length'] )
_a : Union[str, Any] = inputs.pop('labels' )
with torch.no_grad():
# compute loss on predict data
_a, _a : Optional[int] = self._compute_loss(_a ,_a ,_a )
_a : Optional[Any] = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
_a : Optional[Any] = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
_a : Dict = self._pad_tensors_to_max_len(_a ,gen_kwargs['max_length'] )
return (loss, logits, labels)
def __lowercase ( self : str ,_a : Tuple ,_a : Tuple ):
'''simple docstring'''
_a : List[Any] = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
'Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be'
F""" padded to `max_length`={max_length}""" )
_a : int = pad_token_id * torch.ones(
(tensor.shape[0], max_length) ,dtype=tensor.dtype ,device=tensor.device )
_a : Union[str, Any] = tensor
return padded_tensor
| 271 | 1 |
'''simple docstring'''
def UpperCAmelCase_ (__a : int = 1_0_0 ):
"""simple docstring"""
_a : List[Any] = n * (n + 1) * (2 * n + 1) / 6
_a : int = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 271 |
'''simple docstring'''
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
__lowerCAmelCase = re.compile(r"""\s+""")
def UpperCAmelCase_ (__a : Any ):
"""simple docstring"""
return {"hash": hashlib.mda(re.sub(__a , '' , example['content'] ).encode('utf-8' ) ).hexdigest()}
def UpperCAmelCase_ (__a : str ):
"""simple docstring"""
_a : List[str] = [len(__a ) for line in example['content'].splitlines()]
return {"line_mean": np.mean(__a ), "line_max": max(__a )}
def UpperCAmelCase_ (__a : str ):
"""simple docstring"""
_a : Union[str, Any] = np.mean([c.isalnum() for c in example['content']] )
return {"alpha_frac": alpha_frac}
def UpperCAmelCase_ (__a : Optional[int] , __a : Any ):
"""simple docstring"""
if example["hash"] in uniques:
uniques.remove(example['hash'] )
return True
else:
return False
def UpperCAmelCase_ (__a : int , __a : Union[str, Any]=5 ):
"""simple docstring"""
_a : Optional[int] = ['auto-generated', 'autogenerated', 'automatically generated']
_a : List[str] = example['content'].splitlines()
for _, line in zip(range(__a ) , __a ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def UpperCAmelCase_ (__a : List[str] , __a : Dict=5 , __a : Tuple=0.05 ):
"""simple docstring"""
_a : Optional[int] = ['unit tests', 'test file', 'configuration file']
_a : int = example['content'].splitlines()
_a : int = 0
_a : Dict = 0
# first test
for _, line in zip(range(__a ) , __a ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
_a : int = example['content'].count('\n' )
_a : int = int(coeff * nlines )
for line in lines:
count_config += line.lower().count('config' )
count_test += line.lower().count('test' )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def UpperCAmelCase_ (__a : Optional[int] ):
"""simple docstring"""
_a : List[str] = ['def ', 'class ', 'for ', 'while ']
_a : str = example['content'].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def UpperCAmelCase_ (__a : int , __a : Any=4 ):
"""simple docstring"""
_a : List[str] = example['content'].splitlines()
_a : Dict = 0
for line in lines:
counter += line.lower().count('=' )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def UpperCAmelCase_ (__a : str ):
"""simple docstring"""
_a : Optional[Any] = tokenizer(example['content'] , truncation=__a )['input_ids']
_a : Optional[int] = len(example['content'] ) / len(__a )
return {"ratio": ratio}
def UpperCAmelCase_ (__a : str ):
"""simple docstring"""
_a : Dict = {}
results.update(get_hash(__a ) )
results.update(line_stats(__a ) )
results.update(alpha_stats(__a ) )
results.update(char_token_ratio(__a ) )
results.update(is_autogenerated(__a ) )
results.update(is_config_or_test(__a ) )
results.update(has_no_keywords(__a ) )
results.update(has_few_assignments(__a ) )
return results
def UpperCAmelCase_ (__a : Any , __a : Any , __a : str ):
"""simple docstring"""
if not check_uniques(__a , __a ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def UpperCAmelCase_ (__a : Union[str, Any] ):
"""simple docstring"""
with open(__a , 'rb' ) as f_in:
with gzip.open(str(__a ) + '.gz' , 'wb' , compresslevel=6 ) as f_out:
shutil.copyfileobj(__a , __a )
os.unlink(__a )
# Settings
__lowerCAmelCase = HfArgumentParser(PreprocessingArguments)
__lowerCAmelCase = parser.parse_args()
if args.num_workers is None:
__lowerCAmelCase = multiprocessing.cpu_count()
__lowerCAmelCase = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
__lowerCAmelCase = time.time()
__lowerCAmelCase = load_dataset(args.dataset_name, split="""train""")
print(f'''Time to load dataset: {time.time()-t_start:.2f}''')
# Run preprocessing
__lowerCAmelCase = time.time()
__lowerCAmelCase = ds.map(preprocess, num_proc=args.num_workers)
print(f'''Time to preprocess dataset: {time.time()-t_start:.2f}''')
# Deduplicate hashes
__lowerCAmelCase = set(ds.unique("""hash"""))
__lowerCAmelCase = len(uniques) / len(ds)
print(f'''Fraction of duplicates: {1-frac:.2%}''')
# Deduplicate data and apply heuristics
__lowerCAmelCase = time.time()
__lowerCAmelCase = ds.filter(filter, fn_kwargs={"""uniques""": uniques, """args""": args})
print(f'''Time to filter dataset: {time.time()-t_start:.2f}''')
print(f'''Size of filtered dataset: {len(ds_filter)}''')
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
__lowerCAmelCase = time.time()
__lowerCAmelCase , __lowerCAmelCase = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(f'''Time to deduplicate dataset: {time.time()-t_start:.2f}''')
print(f'''Size of deduplicate dataset: {len(ds_filter)}''')
# Save data in batches of samples_per_file
__lowerCAmelCase = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / """duplicate_clusters.json""", """w""") as f:
json.dump(duplicate_clusters, f)
__lowerCAmelCase = output_dir / """data"""
data_dir.mkdir(exist_ok=True)
__lowerCAmelCase = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
__lowerCAmelCase = str(data_dir / f'''file-{file_number+1:012}.json''')
__lowerCAmelCase = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(f'''Time to save dataset: {time.time()-t_start:.2f}''')
| 271 | 1 |
'''simple docstring'''
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
__lowerCAmelCase = get_tests_dir() + """/test_data/fsmt/fsmt_val_data.json"""
with io.open(filename, """r""", encoding="""utf-8""") as f:
__lowerCAmelCase = json.load(f)
@require_torch
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self : List[str] ,_a : Dict ):
'''simple docstring'''
return FSMTTokenizer.from_pretrained(_a )
def __lowercase ( self : int ,_a : List[str] ):
'''simple docstring'''
_a : List[str] = FSMTForConditionalGeneration.from_pretrained(_a ).to(_a )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
['en-ru', 26.0],
['ru-en', 22.0],
['en-de', 22.0],
['de-en', 29.0],
] )
@slow
def __lowercase ( self : int ,_a : Union[str, Any] ,_a : Union[str, Any] ):
'''simple docstring'''
_a : int = F"""facebook/wmt19-{pair}"""
_a : Any = self.get_tokenizer(_a )
_a : Any = self.get_model(_a )
_a : Union[str, Any] = bleu_data[pair]['src']
_a : Optional[int] = bleu_data[pair]['tgt']
_a : Any = tokenizer(_a ,return_tensors='pt' ,truncation=_a ,padding='longest' ).to(_a )
_a : int = model.generate(
input_ids=batch.input_ids ,num_beams=8 ,)
_a : Optional[int] = tokenizer.batch_decode(
_a ,skip_special_tokens=_a ,clean_up_tokenization_spaces=_a )
_a : Optional[Any] = calculate_bleu(_a ,_a )
print(_a )
self.assertGreaterEqual(scores['bleu'] ,_a )
| 271 |
'''simple docstring'''
import argparse
from typing import List
import evaluate
import numpy as np
import torch
from datasets import DatasetDict, load_dataset
# New Code #
# We'll be using StratifiedKFold for this example
from sklearn.model_selection import StratifiedKFold
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to perform Cross Validation,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__lowerCAmelCase = 1_6
__lowerCAmelCase = 3_2
def UpperCAmelCase_ (__a : Accelerator , __a : DatasetDict , __a : List[int] , __a : List[int] , __a : int = 1_6 ):
"""simple docstring"""
_a : Union[str, Any] = AutoTokenizer.from_pretrained('bert-base-cased' )
_a : str = DatasetDict(
{
'train': dataset['train'].select(__a ),
'validation': dataset['train'].select(__a ),
'test': dataset['validation'],
} )
def tokenize_function(__a : List[Any] ):
# max_length=None => use the model max length (it's actually the default)
_a : Optional[int] = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=__a , max_length=__a )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_a : List[str] = datasets.map(
__a , batched=__a , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_a : List[Any] = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(__a : int ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_a : Dict = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_a : Tuple = 1_6
elif accelerator.mixed_precision != "no":
_a : List[Any] = 8
else:
_a : List[Any] = None
return tokenizer.pad(
__a , padding='longest' , max_length=__a , pad_to_multiple_of=__a , return_tensors='pt' , )
# Instantiate dataloaders.
_a : Any = DataLoader(
tokenized_datasets['train'] , shuffle=__a , collate_fn=__a , batch_size=__a )
_a : Optional[int] = DataLoader(
tokenized_datasets['validation'] , shuffle=__a , collate_fn=__a , batch_size=__a )
_a : Optional[Any] = DataLoader(
tokenized_datasets['test'] , shuffle=__a , collate_fn=__a , batch_size=__a )
return train_dataloader, eval_dataloader, test_dataloader
def UpperCAmelCase_ (__a : Any , __a : Union[str, Any] ):
"""simple docstring"""
_a : Dict = []
# Download the dataset
_a : Tuple = load_dataset('glue' , 'mrpc' )
# Create our splits
_a : Union[str, Any] = StratifiedKFold(n_splits=int(args.num_folds ) )
# Initialize accelerator
_a : Any = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_a : Optional[Any] = config['lr']
_a : Optional[int] = int(config['num_epochs'] )
_a : Dict = int(config['seed'] )
_a : Dict = int(config['batch_size'] )
_a : Optional[int] = evaluate.load('glue' , 'mrpc' )
# If the batch size is too big we use gradient accumulation
_a : List[Any] = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
_a : Any = batch_size // MAX_GPU_BATCH_SIZE
_a : List[str] = MAX_GPU_BATCH_SIZE
set_seed(__a )
# New Code #
# Create our folds:
_a : int = kfold.split(np.zeros(datasets['train'].num_rows ) , datasets['train']['label'] )
_a : Any = []
# Iterate over them
for i, (train_idxs, valid_idxs) in enumerate(__a ):
_a, _a, _a : Optional[Any] = get_fold_dataloaders(
__a , __a , __a , __a , )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_a : Dict = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=__a )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_a : List[Any] = model.to(accelerator.device )
# Instantiate optimizer
_a : List[str] = AdamW(params=model.parameters() , lr=__a )
# Instantiate scheduler
_a : List[Any] = get_linear_schedule_with_warmup(
optimizer=__a , num_warmup_steps=1_0_0 , num_training_steps=(len(__a ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_a, _a, _a, _a, _a : Union[str, Any] = accelerator.prepare(
__a , __a , __a , __a , __a )
# Now we train the model
for epoch in range(__a ):
model.train()
for step, batch in enumerate(__a ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
_a : Dict = model(**__a )
_a : int = outputs.loss
_a : Any = loss / gradient_accumulation_steps
accelerator.backward(__a )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__a ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_a : Union[str, Any] = model(**__a )
_a : Tuple = outputs.logits.argmax(dim=-1 )
_a, _a : Any = accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=__a , references=__a , )
_a : List[Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , __a )
# New Code #
# We also run predictions on the test set at the very end
_a : Any = []
for step, batch in enumerate(__a ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_a : Tuple = model(**__a )
_a : Dict = outputs.logits
_a, _a : Optional[int] = accelerator.gather_for_metrics((predictions, batch['labels']) )
fold_predictions.append(predictions.cpu() )
if i == 0:
# We need all of the test predictions
test_references.append(references.cpu() )
# Use accelerator.print to print only on the main process.
test_predictions.append(torch.cat(__a , dim=0 ) )
# We now need to release all our memory and get rid of the current model, optimizer, etc
accelerator.free_memory()
# New Code #
# Finally we check the accuracy of our folded results:
_a : Dict = torch.cat(__a , dim=0 )
_a : Any = torch.stack(__a , dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 )
_a : str = metric.compute(predictions=__a , references=__a )
accelerator.print('Average test metrics from all folds:' , __a )
def UpperCAmelCase_ ():
"""simple docstring"""
_a : Any = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' , type=__a , default=__a , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
# New Code #
parser.add_argument('--num_folds' , type=__a , default=3 , help='The number of splits to perform across the dataset' )
_a : Any = parser.parse_args()
_a : int = {'lr': 2e-5, 'num_epochs': 3, 'seed': 4_2, 'batch_size': 1_6}
training_function(__a , __a )
if __name__ == "__main__":
main()
| 271 | 1 |
'''simple docstring'''
from __future__ import annotations
from random import choice
def UpperCAmelCase_ (__a : str ):
"""simple docstring"""
return choice(__a )
def UpperCAmelCase_ (__a : list[int] , __a : int ):
"""simple docstring"""
_a : Dict = random_pivot(__a )
# partition based on pivot
# linear time
_a : Optional[int] = [e for e in lst if e < pivot]
_a : List[str] = [e for e in lst if e > pivot]
# if we get lucky, pivot might be the element we want.
# we can easily see this:
# small (elements smaller than k)
# + pivot (kth element)
# + big (elements larger than k)
if len(__a ) == k - 1:
return pivot
# pivot is in elements bigger than k
elif len(__a ) < k - 1:
return kth_number(__a , k - len(__a ) - 1 )
# pivot is in elements smaller than k
else:
return kth_number(__a , __a )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 271 |
'''simple docstring'''
from __future__ import annotations
__lowerCAmelCase = [-1_0, -5, 0, 5, 5.1, 1_1, 1_3, 2_1, 3, 4, -2_1, -1_0, -5, -1, 0]
__lowerCAmelCase = [-5, 0, 5, 5.1, 1_1, 1_3, 2_1, -1, 4, -1, -1_0, -5, -1, 0, -1]
def UpperCAmelCase_ (__a : list[float] ):
"""simple docstring"""
_a : Optional[int] = []
_a : int = len(__a )
for i in range(__a ):
_a : float = -1
for j in range(i + 1 , __a ):
if arr[i] < arr[j]:
_a : Any = arr[j]
break
result.append(__a )
return result
def UpperCAmelCase_ (__a : list[float] ):
"""simple docstring"""
_a : Tuple = []
for i, outer in enumerate(__a ):
_a : float = -1
for inner in arr[i + 1 :]:
if outer < inner:
_a : Dict = inner
break
result.append(__a )
return result
def UpperCAmelCase_ (__a : list[float] ):
"""simple docstring"""
_a : int = len(__a )
_a : list[float] = []
_a : list[float] = [-1] * arr_size
for index in reversed(range(__a ) ):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
_a : Dict = stack[-1]
stack.append(arr[index] )
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
__lowerCAmelCase = (
"""from __main__ import arr, next_greatest_element_slow, """
"""next_greatest_element_fast, next_greatest_element"""
)
print(
"""next_greatest_element_slow():""",
timeit("""next_greatest_element_slow(arr)""", setup=setup),
)
print(
"""next_greatest_element_fast():""",
timeit("""next_greatest_element_fast(arr)""", setup=setup),
)
print(
""" next_greatest_element():""",
timeit("""next_greatest_element(arr)""", setup=setup),
)
| 271 | 1 |
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : int = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
__UpperCAmelCase : int = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def __lowercase ( self : Any ,_a : Any ,_a : List[str] ,_a : str ):
'''simple docstring'''
_a : Optional[Any] = TextaTextGenerationPipeline(model=_a ,tokenizer=_a )
return generator, ["Something to write", "Something else"]
def __lowercase ( self : int ,_a : int ,_a : Optional[Any] ):
'''simple docstring'''
_a : Dict = generator('Something there' )
self.assertEqual(_a ,[{'generated_text': ANY(_a )}] )
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]['generated_text'].startswith('Something there' ) )
_a : List[str] = generator(['This is great !', 'Something else'] ,num_return_sequences=2 ,do_sample=_a )
self.assertEqual(
_a ,[
[{'generated_text': ANY(_a )}, {'generated_text': ANY(_a )}],
[{'generated_text': ANY(_a )}, {'generated_text': ANY(_a )}],
] ,)
_a : Dict = generator(
['This is great !', 'Something else'] ,num_return_sequences=2 ,batch_size=2 ,do_sample=_a )
self.assertEqual(
_a ,[
[{'generated_text': ANY(_a )}, {'generated_text': ANY(_a )}],
[{'generated_text': ANY(_a )}, {'generated_text': ANY(_a )}],
] ,)
with self.assertRaises(_a ):
generator(4 )
@require_torch
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
_a : Dict = pipeline('text2text-generation' ,model='patrickvonplaten/t5-tiny-random' ,framework='pt' )
# do_sample=False necessary for reproducibility
_a : int = generator('Something there' ,do_sample=_a )
self.assertEqual(_a ,[{'generated_text': ''}] )
_a : List[str] = 3
_a : int = generator(
'Something there' ,num_return_sequences=_a ,num_beams=_a ,)
_a : Optional[int] = [
{'generated_text': 'Beide Beide Beide Beide Beide Beide Beide Beide Beide'},
{'generated_text': 'Beide Beide Beide Beide Beide Beide Beide Beide'},
{'generated_text': ''},
]
self.assertEqual(_a ,_a )
_a : Any = generator('This is a test' ,do_sample=_a ,num_return_sequences=2 ,return_tensors=_a )
self.assertEqual(
_a ,[
{'generated_token_ids': ANY(torch.Tensor )},
{'generated_token_ids': ANY(torch.Tensor )},
] ,)
_a : List[Any] = generator.model.config.eos_token_id
_a : Optional[Any] = '<pad>'
_a : List[Any] = generator(
['This is a test', 'This is a second test'] ,do_sample=_a ,num_return_sequences=2 ,batch_size=2 ,return_tensors=_a ,)
self.assertEqual(
_a ,[
[
{'generated_token_ids': ANY(torch.Tensor )},
{'generated_token_ids': ANY(torch.Tensor )},
],
[
{'generated_token_ids': ANY(torch.Tensor )},
{'generated_token_ids': ANY(torch.Tensor )},
],
] ,)
@require_tf
def __lowercase ( self : List[str] ):
'''simple docstring'''
_a : Optional[int] = pipeline('text2text-generation' ,model='patrickvonplaten/t5-tiny-random' ,framework='tf' )
# do_sample=False necessary for reproducibility
_a : str = generator('Something there' ,do_sample=_a )
self.assertEqual(_a ,[{'generated_text': ''}] )
| 271 |
'''simple docstring'''
import os
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home
__lowerCAmelCase = HUGGINGFACE_HUB_CACHE
__lowerCAmelCase = """config.json"""
__lowerCAmelCase = """diffusion_pytorch_model.bin"""
__lowerCAmelCase = """diffusion_flax_model.msgpack"""
__lowerCAmelCase = """model.onnx"""
__lowerCAmelCase = """diffusion_pytorch_model.safetensors"""
__lowerCAmelCase = """weights.pb"""
__lowerCAmelCase = """https://huggingface.co"""
__lowerCAmelCase = default_cache_path
__lowerCAmelCase = """diffusers_modules"""
__lowerCAmelCase = os.getenv("""HF_MODULES_CACHE""", os.path.join(hf_cache_home, """modules"""))
__lowerCAmelCase = ["""fp16""", """non-ema"""]
__lowerCAmelCase = """.self_attn"""
| 271 | 1 |
'''simple docstring'''
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('''TEST_SAGEMAKER''' , '''False''' ) ) is not True , reason='''Skipping test because should only be run when releasing minor transformers version''' , )
@pytest.mark.usefixtures('''sm_env''' )
@parameterized_class(
[
{
'''framework''': '''pytorch''',
'''script''': '''run_glue_model_parallelism.py''',
'''model_name_or_path''': '''roberta-large''',
'''instance_type''': '''ml.p3dn.24xlarge''',
'''results''': {'''train_runtime''': 1600, '''eval_accuracy''': 0.3, '''eval_loss''': 1.2},
},
{
'''framework''': '''pytorch''',
'''script''': '''run_glue.py''',
'''model_name_or_path''': '''roberta-large''',
'''instance_type''': '''ml.p3dn.24xlarge''',
'''results''': {'''train_runtime''': 1600, '''eval_accuracy''': 0.3, '''eval_loss''': 1.2},
},
] )
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
if self.framework == "pytorch":
subprocess.run(
F"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() ,encoding='utf-8' ,check=_a ,)
assert hasattr(self ,'env' )
def __lowercase ( self : int ,_a : int ):
'''simple docstring'''
_a : Dict = {
'enabled': True,
'processes_per_host': 8,
}
_a : Union[str, Any] = {
'enabled': True,
'parameters': {
'microbatches': 4,
'placement_strategy': 'spread',
'pipeline': 'interleaved',
'optimize': 'speed',
'partitions': 4,
'ddp': True,
},
}
_a : List[str] = {'smdistributed': {'modelparallel': smp_options}, 'mpi': mpi_options}
_a : List[str] = 'trainer' if self.script == 'run_glue.py' else 'smtrainer'
# creates estimator
return HuggingFace(
entry_point=self.script ,source_dir=self.env.test_path ,role=self.env.role ,image_uri=self.env.image_uri ,base_job_name=F"""{self.env.base_job_name}-{instance_count}-smp-{name_extension}""" ,instance_count=_a ,instance_type=self.instance_type ,debugger_hook_config=_a ,hyperparameters={
**self.env.hyperparameters,
'model_name_or_path': self.model_name_or_path,
'max_steps': 500,
} ,metric_definitions=self.env.metric_definitions ,distribution=_a ,py_version='py36' ,)
def __lowercase ( self : int ,_a : List[str] ):
'''simple docstring'''
TrainingJobAnalytics(_a ).export_csv(F"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(1,)] )
def __lowercase ( self : Union[str, Any] ,_a : List[str] ):
'''simple docstring'''
_a : str = self.create_estimator(_a )
# run training
estimator.fit()
# result dataframe
_a : Union[str, Any] = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
_a : Dict = list(result_metrics_df[result_metrics_df.metric_name == 'eval_accuracy']['value'] )
_a : List[str] = list(result_metrics_df[result_metrics_df.metric_name == 'eval_loss']['value'] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
_a : List[str] = (
Session().describe_training_job(estimator.latest_training_job.name ).get('TrainingTimeInSeconds' ,99_9999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['eval_accuracy'] for t in eval_accuracy )
assert all(t <= self.results['eval_loss'] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F"""{estimator.latest_training_job.name}.json""" ,'w' ) as outfile:
json.dump({'train_time': train_runtime, 'eval_accuracy': eval_accuracy, 'eval_loss': eval_loss} ,_a )
| 271 |
'''simple docstring'''
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self : int ,_a : Any ,_a : Optional[int]=2 ,_a : Optional[Any]=True ,_a : Dict=False ,_a : Dict=10 ,_a : Any=3 ,_a : str=32 * 8 ,_a : Optional[int]=32 * 8 ,_a : int=4 ,_a : str=64 ,):
'''simple docstring'''
_a : Dict = parent
_a : Union[str, Any] = batch_size
_a : Tuple = is_training
_a : List[str] = use_auxiliary_loss
_a : Optional[Any] = num_queries
_a : str = num_channels
_a : List[str] = min_size
_a : int = max_size
_a : Optional[int] = num_labels
_a : List[str] = hidden_dim
_a : int = hidden_dim
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
_a : Tuple = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
_a )
_a : Optional[Any] = torch.ones([self.batch_size, self.min_size, self.max_size] ,device=_a )
_a : Union[str, Any] = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] ,device=_a ) > 0.5
).float()
_a : Tuple = (torch.rand((self.batch_size, self.num_labels) ,device=_a ) > 0.5).long()
_a : Dict = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
_a : int = MaskaFormerConfig(
hidden_size=self.hidden_dim ,)
_a : str = self.num_queries
_a : Union[str, Any] = self.num_labels
_a : Tuple = [1, 1, 1, 1]
_a : Dict = self.num_channels
_a : str = 64
_a : Tuple = 128
_a : Optional[Any] = self.hidden_dim
_a : Union[str, Any] = self.hidden_dim
_a : List[Any] = self.hidden_dim
return config
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
_a, _a, _a, _a, _a : Optional[Any] = self.prepare_config_and_inputs()
_a : str = {'pixel_values': pixel_values, 'pixel_mask': pixel_mask}
return config, inputs_dict
def __lowercase ( self : List[str] ,_a : Optional[Any] ,_a : str ):
'''simple docstring'''
_a : str = output.encoder_hidden_states
_a : Any = output.pixel_decoder_hidden_states
_a : Optional[Any] = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(_a ) ,len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_a ) ,len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_a ) ,config.decoder_layers )
def __lowercase ( self : List[str] ,_a : str ,_a : List[Any] ,_a : Any ,_a : Union[str, Any]=False ):
'''simple docstring'''
with torch.no_grad():
_a : str = MaskaFormerModel(config=_a )
model.to(_a )
model.eval()
_a : Any = model(pixel_values=_a ,pixel_mask=_a )
_a : Optional[Any] = model(_a ,output_hidden_states=_a )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape ,(self.batch_size, self.num_queries, self.hidden_dim) ,)
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(_a ,_a )
def __lowercase ( self : Tuple ,_a : List[Any] ,_a : Union[str, Any] ,_a : Tuple ,_a : List[str] ,_a : Any ):
'''simple docstring'''
_a : int = MaskaFormerForUniversalSegmentation(config=_a )
model.to(_a )
model.eval()
def comm_check_on_output(_a : Any ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape ,(self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) ,)
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape ,(self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
_a : Any = model(pixel_values=_a ,pixel_mask=_a )
_a : Optional[int] = model(_a )
comm_check_on_output(_a )
_a : List[str] = model(
pixel_values=_a ,pixel_mask=_a ,mask_labels=_a ,class_labels=_a )
comm_check_on_output(_a )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape ,torch.Size([1] ) )
@require_torch
class UpperCAmelCase__ ( lowercase__ , lowercase__ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
__UpperCAmelCase : Dict = {'''feature-extraction''': MaskaFormerModel} if is_torch_available() else {}
__UpperCAmelCase : Dict = False
__UpperCAmelCase : Tuple = False
__UpperCAmelCase : Dict = False
__UpperCAmelCase : List[Any] = False
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
_a : Union[str, Any] = MaskaFormerModelTester(self )
_a : Dict = ConfigTester(self ,config_class=_a ,has_text_modality=_a )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
_a, _a : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(_a ,**_a ,output_hidden_states=_a )
def __lowercase ( self : str ):
'''simple docstring'''
_a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*_a )
@unittest.skip(reason='Mask2Former does not use inputs_embeds' )
def __lowercase ( self : Any ):
'''simple docstring'''
pass
@unittest.skip(reason='Mask2Former does not have a get_input_embeddings method' )
def __lowercase ( self : str ):
'''simple docstring'''
pass
@unittest.skip(reason='Mask2Former is not a generative model' )
def __lowercase ( self : List[Any] ):
'''simple docstring'''
pass
@unittest.skip(reason='Mask2Former does not use token embeddings' )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(
reason='Mask2Former has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def __lowercase ( self : Dict ):
'''simple docstring'''
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def __lowercase ( self : List[Any] ):
'''simple docstring'''
pass
def __lowercase ( self : int ):
'''simple docstring'''
_a, _a : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : Union[str, Any] = model_class(_a )
_a : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a : Optional[Any] = [*signature.parameters.keys()]
_a : List[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] ,_a )
@slow
def __lowercase ( self : List[str] ):
'''simple docstring'''
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
_a : Dict = MaskaFormerModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def __lowercase ( self : List[Any] ):
'''simple docstring'''
_a : int = (self.model_tester.min_size,) * 2
_a : Any = {
'pixel_values': torch.randn((2, 3, *size) ,device=_a ),
'mask_labels': torch.randn((2, 10, *size) ,device=_a ),
'class_labels': torch.zeros(2 ,10 ,device=_a ).long(),
}
_a : List[Any] = self.model_tester.get_config()
_a : int = MaskaFormerForUniversalSegmentation(_a ).to(_a )
_a : str = model(**_a )
self.assertTrue(outputs.loss is not None )
def __lowercase ( self : List[str] ):
'''simple docstring'''
_a, _a : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(_a ,**_a ,output_hidden_states=_a )
def __lowercase ( self : int ):
'''simple docstring'''
_a, _a : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : Any = model_class(_a ).to(_a )
_a : Optional[int] = model(**_a ,output_attentions=_a )
self.assertTrue(outputs.attentions is not None )
def __lowercase ( self : Tuple ):
'''simple docstring'''
if not self.model_tester.is_training:
return
_a : List[str] = self.all_model_classes[1]
_a, _a, _a, _a, _a : List[str] = self.model_tester.prepare_config_and_inputs()
_a : Any = model_class(_a )
model.to(_a )
model.train()
_a : Union[str, Any] = model(_a ,mask_labels=_a ,class_labels=_a ).loss
loss.backward()
def __lowercase ( self : int ):
'''simple docstring'''
_a : int = self.all_model_classes[1]
_a, _a, _a, _a, _a : List[Any] = self.model_tester.prepare_config_and_inputs()
_a : str = True
_a : str = True
_a : List[str] = model_class(_a ).to(_a )
model.train()
_a : Optional[int] = model(_a ,mask_labels=_a ,class_labels=_a )
_a : Tuple = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
_a : str = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
_a : Dict = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
_a : List[str] = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=_a )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
__lowerCAmelCase = 1e-4
def UpperCAmelCase_ ():
"""simple docstring"""
_a : int = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_vision
@slow
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def __lowercase ( self : Any ):
'''simple docstring'''
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def __lowercase ( self : Any ):
'''simple docstring'''
_a : List[str] = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(_a )
_a : int = self.default_image_processor
_a : Tuple = prepare_img()
_a : Any = image_processor(_a ,return_tensors='pt' ).to(_a )
_a : Union[str, Any] = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_a ,(1, 3, 384, 384) )
with torch.no_grad():
_a : Optional[Any] = model(**_a )
_a : List[Any] = torch.tensor(
[[-0.2790, -1.0717, -1.1668], [-0.5128, -0.3128, -0.4987], [-0.5832, 0.1971, -0.0197]] ).to(_a )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] ,_a ,atol=_a ) )
_a : str = torch.tensor(
[[0.8973, 1.1847, 1.1776], [1.1934, 1.5040, 1.5128], [1.1153, 1.4486, 1.4951]] ).to(_a )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] ,_a ,atol=_a ) )
_a : Any = torch.tensor(
[[2.1152, 1.7000, -0.8603], [1.5808, 1.8004, -0.9353], [1.6043, 1.7495, -0.5999]] ).to(_a )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] ,_a ,atol=_a ) )
def __lowercase ( self : Tuple ):
'''simple docstring'''
_a : List[Any] = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(_a ).eval()
_a : Optional[Any] = self.default_image_processor
_a : List[Any] = prepare_img()
_a : str = image_processor(_a ,return_tensors='pt' ).to(_a )
_a : Any = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_a ,(1, 3, 384, 384) )
with torch.no_grad():
_a : Optional[int] = model(**_a )
# masks_queries_logits
_a : Dict = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape ,(1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
_a : Dict = [
[-8.7839, -9.0056, -8.8121],
[-7.4104, -7.0313, -6.5401],
[-6.6105, -6.3427, -6.4675],
]
_a : Optional[Any] = torch.tensor(_a ).to(_a )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] ,_a ,atol=_a ) )
# class_queries_logits
_a : str = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape ,(1, model.config.num_queries, model.config.num_labels + 1) )
_a : str = torch.tensor(
[
[1.8324, -8.0835, -4.1922],
[0.8450, -9.0050, -3.6053],
[0.3045, -7.7293, -3.0275],
] ).to(_a )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] ,_a ,atol=_a ) )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
_a : Any = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(_a ).eval()
_a : Tuple = self.default_image_processor
_a : Tuple = image_processor(
[np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] ,segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] ,return_tensors='pt' ,)
_a : str = inputs['pixel_values'].to(_a )
_a : str = [el.to(_a ) for el in inputs['mask_labels']]
_a : Dict = [el.to(_a ) for el in inputs['class_labels']]
with torch.no_grad():
_a : List[str] = model(**_a )
self.assertTrue(outputs.loss is not None )
| 271 | 1 |
'''simple docstring'''
from __future__ import annotations
import math
def UpperCAmelCase_ (__a : int ):
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__a ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
__lowerCAmelCase = [num for num in range(3, 1_0_0_0_0_1, 2) if not is_prime(num)]
def UpperCAmelCase_ (__a : int ):
"""simple docstring"""
if not isinstance(__a , __a ):
raise ValueError('n must be an integer' )
if n <= 0:
raise ValueError('n must be >= 0' )
_a : Dict = []
for num in range(len(__a ) ):
_a : List[Any] = 0
while 2 * i * i <= odd_composites[num]:
_a : Any = odd_composites[num] - 2 * i * i
if is_prime(__a ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(__a ) == n:
return list_nums
return []
def UpperCAmelCase_ ():
"""simple docstring"""
return compute_nums(1 )[0]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 271 |
'''simple docstring'''
import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def UpperCAmelCase_ (__a : List[Any] ):
"""simple docstring"""
if (
(cp >= 0x4E_00 and cp <= 0x9F_FF)
or (cp >= 0x34_00 and cp <= 0x4D_BF) #
or (cp >= 0x2_00_00 and cp <= 0x2_A6_DF) #
or (cp >= 0x2_A7_00 and cp <= 0x2_B7_3F) #
or (cp >= 0x2_B7_40 and cp <= 0x2_B8_1F) #
or (cp >= 0x2_B8_20 and cp <= 0x2_CE_AF) #
or (cp >= 0xF9_00 and cp <= 0xFA_FF)
or (cp >= 0x2_F8_00 and cp <= 0x2_FA_1F) #
): #
return True
return False
def UpperCAmelCase_ (__a : str ):
"""simple docstring"""
for char in word:
_a : Union[str, Any] = ord(__a )
if not _is_chinese_char(__a ):
return 0
return 1
def UpperCAmelCase_ (__a : List[str] ):
"""simple docstring"""
_a : Dict = set()
for token in tokens:
_a : str = len(__a ) > 1 and is_chinese(__a )
if chinese_word:
word_set.add(__a )
_a : Optional[Any] = list(__a )
return word_list
def UpperCAmelCase_ (__a : List[str] , __a : set() ):
"""simple docstring"""
if not chinese_word_set:
return bert_tokens
_a : Optional[Any] = max([len(__a ) for w in chinese_word_set] )
_a : Optional[int] = bert_tokens
_a, _a : Any = 0, len(__a )
while start < end:
_a : Tuple = True
if is_chinese(bert_word[start] ):
_a : Union[str, Any] = min(end - start , __a )
for i in range(__a , 1 , -1 ):
_a : Optional[Any] = ''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
_a : Any = '##' + bert_word[j]
_a : Union[str, Any] = start + i
_a : int = False
break
if single_word:
start += 1
return bert_word
def UpperCAmelCase_ (__a : List[str] , __a : LTP , __a : BertTokenizer ):
"""simple docstring"""
_a : int = []
for i in range(0 , len(__a ) , 1_0_0 ):
_a : Union[str, Any] = ltp_tokenizer.seg(lines[i : i + 1_0_0] )[0]
_a : Optional[Any] = [get_chinese_word(__a ) for r in res]
ltp_res.extend(__a )
assert len(__a ) == len(__a )
_a : str = []
for i in range(0 , len(__a ) , 1_0_0 ):
_a : List[str] = bert_tokenizer(lines[i : i + 1_0_0] , add_special_tokens=__a , truncation=__a , max_length=5_1_2 )
bert_res.extend(res['input_ids'] )
assert len(__a ) == len(__a )
_a : List[str] = []
for input_ids, chinese_word in zip(__a , __a ):
_a : int = []
for id in input_ids:
_a : Optional[int] = bert_tokenizer._convert_id_to_token(__a )
input_tokens.append(__a )
_a : List[str] = add_sub_symbol(__a , __a )
_a : Tuple = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(__a ):
if token[:2] == "##":
_a : str = token[2:]
# save chinese tokens' pos
if len(__a ) == 1 and _is_chinese_char(ord(__a ) ):
ref_id.append(__a )
ref_ids.append(__a )
assert len(__a ) == len(__a )
return ref_ids
def UpperCAmelCase_ (__a : Optional[Any] ):
"""simple docstring"""
with open(args.file_name , 'r' , encoding='utf-8' ) as f:
_a : Dict = f.readlines()
_a : int = [line.strip() for line in data if len(__a ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
_a : int = LTP(args.ltp ) # faster in GPU device
_a : Tuple = BertTokenizer.from_pretrained(args.bert )
_a : int = prepare_ref(__a , __a , __a )
with open(args.save_path , 'w' , encoding='utf-8' ) as f:
_a : Optional[Any] = [json.dumps(__a ) + '\n' for ref in ref_ids]
f.writelines(__a )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser(description="""prepare_chinese_ref""")
parser.add_argument(
"""--file_name""",
type=str,
default="""./resources/chinese-demo.txt""",
help="""file need process, same as training data in lm""",
)
parser.add_argument(
"""--ltp""", type=str, default="""./resources/ltp""", help="""resources for LTP tokenizer, usually a path"""
)
parser.add_argument("""--bert""", type=str, default="""./resources/robert""", help="""resources for Bert tokenizer""")
parser.add_argument("""--save_path""", type=str, default="""./resources/ref.txt""", help="""path to save res""")
__lowerCAmelCase = parser.parse_args()
main(args)
| 271 | 1 |
'''simple docstring'''
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self : Dict ):
'''simple docstring'''
_a : Dict = {}
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
print(self.vertex )
for i in self.vertex:
print(_a ,' -> ' ,' -> '.join([str(_a ) for j in self.vertex[i]] ) )
def __lowercase ( self : Dict ,_a : int ,_a : int ):
'''simple docstring'''
if from_vertex in self.vertex:
self.vertex[from_vertex].append(_a )
else:
# else make a new vertex
_a : int = [to_vertex]
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
_a : Tuple = [False] * len(self.vertex )
# call the recursive helper function
for i in range(len(self.vertex ) ):
if not visited[i]:
self.dfs_recursive(_a ,_a )
def __lowercase ( self : Union[str, Any] ,_a : int ,_a : list ):
'''simple docstring'''
_a : List[Any] = True
print(_a ,end=' ' )
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(_a ,_a )
if __name__ == "__main__":
__lowerCAmelCase = Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print("""DFS:""")
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3
| 271 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_videomae import VideoMAEImageProcessor
__lowerCAmelCase = logging.get_logger(__name__)
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
def __init__( self : Tuple ,*_a : List[str] ,**_a : Any ):
'''simple docstring'''
warnings.warn(
'The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use VideoMAEImageProcessor instead.' ,_a ,)
super().__init__(*_a ,**_a )
| 271 | 1 |
'''simple docstring'''
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
__lowerCAmelCase = """sshleifer/bart-tiny-random"""
__lowerCAmelCase = """patrickvonplaten/t5-tiny-random"""
@require_torch
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
return AutoConfig.from_pretrained(_a )
def __lowercase ( self : Tuple ):
'''simple docstring'''
_a, *_a : int = create_student_by_copying_alternating_layers(_a ,tempfile.mkdtemp() ,e=1 ,d=1 )
self.assertEqual(student.config.num_hidden_layers ,1 )
def __lowercase ( self : Dict ):
'''simple docstring'''
_a, *_a : Optional[int] = create_student_by_copying_alternating_layers(_a ,tempfile.mkdtemp() ,e=1 ,d=_a )
def __lowercase ( self : int ):
'''simple docstring'''
_a, *_a : Tuple = create_student_by_copying_alternating_layers(_a ,tempfile.mkdtemp() ,e=1 ,d=_a )
self.assertEqual(student.config.encoder_layers ,1 )
self.assertEqual(student.config.decoder_layers ,self.teacher_config.encoder_layers )
def __lowercase ( self : str ):
'''simple docstring'''
_a, *_a : List[Any] = create_student_by_copying_alternating_layers(_a ,tempfile.mkdtemp() ,e=1 ,d=1 )
self.assertEqual(student.config.encoder_layers ,1 )
self.assertEqual(student.config.decoder_layers ,1 )
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
with self.assertRaises(_a ):
create_student_by_copying_alternating_layers(_a ,tempfile.mkdtemp() ,e=_a ,d=_a )
| 271 |
'''simple docstring'''
from __future__ import annotations
from random import choice
def UpperCAmelCase_ (__a : str ):
"""simple docstring"""
return choice(__a )
def UpperCAmelCase_ (__a : list[int] , __a : int ):
"""simple docstring"""
_a : Dict = random_pivot(__a )
# partition based on pivot
# linear time
_a : Optional[int] = [e for e in lst if e < pivot]
_a : List[str] = [e for e in lst if e > pivot]
# if we get lucky, pivot might be the element we want.
# we can easily see this:
# small (elements smaller than k)
# + pivot (kth element)
# + big (elements larger than k)
if len(__a ) == k - 1:
return pivot
# pivot is in elements bigger than k
elif len(__a ) < k - 1:
return kth_number(__a , k - len(__a ) - 1 )
# pivot is in elements smaller than k
else:
return kth_number(__a , __a )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 271 | 1 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
from torch import nn
from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel
from transformers.utils import ModelOutput
@dataclass
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
__UpperCAmelCase : Optional[torch.FloatTensor] = None
__UpperCAmelCase : torch.FloatTensor = None
__UpperCAmelCase : Optional[Tuple[torch.FloatTensor]] = None
__UpperCAmelCase : Optional[Tuple[torch.FloatTensor]] = None
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
def __init__( self : List[str] ,_a : Union[str, Any]=1 ,_a : List[Any]=0 ,_a : List[Any]=2 ,_a : str=512 ,_a : Optional[int]="cls" ,_a : Union[str, Any]=False ,_a : Optional[int]=True ,**_a : List[Any] ,):
'''simple docstring'''
super().__init__(pad_token_id=_a ,bos_token_id=_a ,eos_token_id=_a ,**_a )
_a : Dict = project_dim
_a : int = pooler_fn
_a : Tuple = learn_encoder
_a : Optional[int] = use_attention_mask
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
__UpperCAmelCase : List[Any] = [R'''pooler''', R'''logit_scale''']
__UpperCAmelCase : int = [R'''position_ids''', R'''predictions.decoder.bias''']
__UpperCAmelCase : Dict = '''roberta'''
__UpperCAmelCase : str = RobertaSeriesConfig
def __init__( self : Tuple ,_a : List[Any] ):
'''simple docstring'''
super().__init__(_a )
_a : Union[str, Any] = XLMRobertaModel(_a )
_a : Tuple = nn.Linear(config.hidden_size ,config.project_dim )
_a : str = getattr(_a ,'has_pre_transformation' ,_a )
if self.has_pre_transformation:
_a : int = nn.Linear(config.hidden_size ,config.project_dim )
_a : int = nn.LayerNorm(config.hidden_size ,eps=config.layer_norm_eps )
self.post_init()
def __lowercase ( self : Union[str, Any] ,_a : Optional[torch.Tensor] = None ,_a : Optional[torch.Tensor] = None ,_a : Optional[torch.Tensor] = None ,_a : Optional[torch.Tensor] = None ,_a : Optional[torch.Tensor] = None ,_a : Optional[torch.Tensor] = None ,_a : Optional[torch.Tensor] = None ,_a : Optional[torch.Tensor] = None ,_a : Optional[bool] = None ,_a : Optional[bool] = None ,_a : Optional[bool] = None ,):
'''simple docstring'''
_a : Optional[int] = return_dict if return_dict is not None else self.config.use_return_dict
_a : str = self.base_model(
input_ids=_a ,attention_mask=_a ,token_type_ids=_a ,position_ids=_a ,head_mask=_a ,inputs_embeds=_a ,encoder_hidden_states=_a ,encoder_attention_mask=_a ,output_attentions=_a ,output_hidden_states=True if self.has_pre_transformation else output_hidden_states ,return_dict=_a ,)
if self.has_pre_transformation:
_a : Optional[Any] = outputs['hidden_states'][-2]
_a : str = self.pre_LN(_a )
_a : Dict = self.transformation_pre(_a )
return TransformationModelOutput(
projection_state=_a ,last_hidden_state=outputs.last_hidden_state ,hidden_states=outputs.hidden_states ,attentions=outputs.attentions ,)
else:
_a : List[str] = self.transformation(outputs.last_hidden_state )
return TransformationModelOutput(
projection_state=_a ,last_hidden_state=outputs.last_hidden_state ,hidden_states=outputs.hidden_states ,attentions=outputs.attentions ,)
| 271 |
'''simple docstring'''
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self : Dict ):
'''simple docstring'''
_a : Dict = {}
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
print(self.vertex )
for i in self.vertex:
print(_a ,' -> ' ,' -> '.join([str(_a ) for j in self.vertex[i]] ) )
def __lowercase ( self : Dict ,_a : int ,_a : int ):
'''simple docstring'''
if from_vertex in self.vertex:
self.vertex[from_vertex].append(_a )
else:
# else make a new vertex
_a : int = [to_vertex]
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
_a : Tuple = [False] * len(self.vertex )
# call the recursive helper function
for i in range(len(self.vertex ) ):
if not visited[i]:
self.dfs_recursive(_a ,_a )
def __lowercase ( self : Union[str, Any] ,_a : int ,_a : list ):
'''simple docstring'''
_a : List[Any] = True
print(_a ,end=' ' )
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(_a ,_a )
if __name__ == "__main__":
__lowerCAmelCase = Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print("""DFS:""")
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3
| 271 | 1 |
'''simple docstring'''
import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForWholeWordMask,
HfArgumentParser,
LineByLineTextDataset,
LineByLineWithRefDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
__lowerCAmelCase = logging.getLogger(__name__)
__lowerCAmelCase = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
__lowerCAmelCase = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class UpperCAmelCase__ :
"""simple docstring"""
__UpperCAmelCase : Optional[str] = field(
default=lowercase__ , metadata={
'''help''': (
'''The model checkpoint for weights initialization. Leave None if you want to train a model from'''
''' scratch.'''
)
} , )
__UpperCAmelCase : Optional[str] = field(
default=lowercase__ , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(lowercase__ )} , )
__UpperCAmelCase : Optional[str] = field(
default=lowercase__ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
__UpperCAmelCase : Optional[str] = field(
default=lowercase__ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
__UpperCAmelCase : Optional[str] = field(
default=lowercase__ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
@dataclass
class UpperCAmelCase__ :
"""simple docstring"""
__UpperCAmelCase : Optional[str] = field(
default=lowercase__ , metadata={'''help''': '''The input training data file (a text file).'''} )
__UpperCAmelCase : Optional[str] = field(
default=lowercase__ , metadata={
'''help''': (
'''The input training data files (multiple files in glob format). '''
'''Very often splitting large files to smaller files can prevent tokenizer going out of memory'''
)
} , )
__UpperCAmelCase : Optional[str] = field(
default=lowercase__ , metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''} , )
__UpperCAmelCase : Optional[str] = field(
default=lowercase__ , metadata={'''help''': '''An optional input train ref data file for whole word mask in Chinese.'''} , )
__UpperCAmelCase : Optional[str] = field(
default=lowercase__ , metadata={'''help''': '''An optional input eval ref data file for whole word mask in Chinese.'''} , )
__UpperCAmelCase : bool = field(
default=lowercase__ , metadata={'''help''': '''Whether distinct lines of text in the dataset are to be handled as distinct sequences.'''} , )
__UpperCAmelCase : bool = field(
default=lowercase__ , metadata={'''help''': '''Train with masked-language modeling loss instead of language modeling.'''} )
__UpperCAmelCase : bool = field(default=lowercase__ , metadata={'''help''': '''Whether ot not to use whole word mask.'''} )
__UpperCAmelCase : float = field(
default=0.1_5 , metadata={'''help''': '''Ratio of tokens to mask for masked language modeling loss'''} )
__UpperCAmelCase : float = field(
default=1 / 6 , metadata={
'''help''': (
'''Ratio of length of a span of masked tokens to surrounding context length for permutation language'''
''' modeling.'''
)
} , )
__UpperCAmelCase : int = field(
default=5 , metadata={'''help''': '''Maximum length of a span of masked tokens for permutation language modeling.'''} )
__UpperCAmelCase : int = field(
default=-1 , metadata={
'''help''': (
'''Optional input sequence length after tokenization.'''
'''The training dataset will be truncated in block of this size for training.'''
'''Default to the model max input length for single sentence inputs (take into account special tokens).'''
)
} , )
__UpperCAmelCase : bool = field(
default=lowercase__ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
def UpperCAmelCase_ (__a : DataTrainingArguments , __a : PreTrainedTokenizer , __a : bool = False , __a : Optional[str] = None , ):
"""simple docstring"""
def _dataset(__a : Any , __a : int=None ):
if args.line_by_line:
if ref_path is not None:
if not args.whole_word_mask or not args.mlm:
raise ValueError('You need to set world whole masking and mlm to True for Chinese Whole Word Mask' )
return LineByLineWithRefDataset(
tokenizer=__a , file_path=__a , block_size=args.block_size , ref_path=__a , )
return LineByLineTextDataset(tokenizer=__a , file_path=__a , block_size=args.block_size )
else:
return TextDataset(
tokenizer=__a , file_path=__a , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=__a , )
if evaluate:
return _dataset(args.eval_data_file , args.eval_ref_file )
elif args.train_data_files:
return ConcatDataset([_dataset(__a ) for f in glob(args.train_data_files )] )
else:
return _dataset(args.train_data_file , args.train_ref_file )
def UpperCAmelCase_ ():
"""simple docstring"""
_a : Any = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
_a, _a, _a : int = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
'Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file '
'or remove the --do_eval argument.' )
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
' --overwrite_output_dir to overcome.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , __a )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
_a : List[Any] = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
_a : List[str] = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
_a : Tuple = CONFIG_MAPPING[model_args.model_type]()
logger.warning('You are instantiating a new config instance from scratch.' )
if model_args.tokenizer_name:
_a : int = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
_a : Any = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
raise ValueError(
'You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another'
' script, save it,and load it from here, using --tokenizer_name' )
if model_args.model_name_or_path:
_a : Any = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=__a , cache_dir=model_args.cache_dir , )
else:
logger.info('Training new model from scratch' )
_a : Tuple = AutoModelWithLMHead.from_config(__a )
model.resize_token_embeddings(len(__a ) )
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
'BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the'
'--mlm flag (masked language modeling).' )
if data_args.block_size <= 0:
_a : List[Any] = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
_a : Any = min(data_args.block_size , tokenizer.max_len )
# Get datasets
_a : List[str] = (
get_dataset(__a , tokenizer=__a , cache_dir=model_args.cache_dir ) if training_args.do_train else None
)
_a : int = (
get_dataset(__a , tokenizer=__a , evaluate=__a , cache_dir=model_args.cache_dir )
if training_args.do_eval
else None
)
if config.model_type == "xlnet":
_a : Any = DataCollatorForPermutationLanguageModeling(
tokenizer=__a , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , )
else:
if data_args.mlm and data_args.whole_word_mask:
_a : Optional[int] = DataCollatorForWholeWordMask(
tokenizer=__a , mlm_probability=data_args.mlm_probability )
else:
_a : int = DataCollatorForLanguageModeling(
tokenizer=__a , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
_a : Dict = Trainer(
model=__a , args=__a , data_collator=__a , train_dataset=__a , eval_dataset=__a , prediction_loss_only=__a , )
# Training
if training_args.do_train:
_a : str = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path )
else None
)
trainer.train(model_path=__a )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
_a : Optional[int] = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
_a : int = trainer.evaluate()
_a : Optional[Any] = math.exp(eval_output['eval_loss'] )
_a : List[Any] = {'perplexity': perplexity}
_a : List[str] = os.path.join(training_args.output_dir , 'eval_results_lm.txt' )
if trainer.is_world_master():
with open(__a , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key in sorted(result.keys() ):
logger.info(' %s = %s' , __a , str(result[key] ) )
writer.write('%s = %s\n' % (key, str(result[key] )) )
results.update(__a )
return results
def UpperCAmelCase_ (__a : Optional[int] ):
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 271 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = """▁"""
__lowerCAmelCase = {"""vocab_file""": """sentencepiece.bpe.model""", """monolingual_vocab_file""": """dict.txt"""}
__lowerCAmelCase = {
"""vocab_file""": {
"""vinai/bartpho-syllable""": """https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model""",
},
"""monolingual_vocab_file""": {
"""vinai/bartpho-syllable""": """https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt""",
},
}
__lowerCAmelCase = {"""vinai/bartpho-syllable""": 1_0_2_4}
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = VOCAB_FILES_NAMES
__UpperCAmelCase : Dict = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : Dict = ['''input_ids''', '''attention_mask''']
def __init__( self : str ,_a : str ,_a : Any ,_a : Any="<s>" ,_a : Dict="</s>" ,_a : int="</s>" ,_a : Union[str, Any]="<s>" ,_a : List[Any]="<unk>" ,_a : Optional[Any]="<pad>" ,_a : List[str]="<mask>" ,_a : Optional[Dict[str, Any]] = None ,**_a : int ,):
'''simple docstring'''
_a : Any = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else mask_token
_a : Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_a ,eos_token=_a ,unk_token=_a ,sep_token=_a ,cls_token=_a ,pad_token=_a ,mask_token=_a ,sp_model_kwargs=self.sp_model_kwargs ,**_a ,)
_a : Optional[int] = vocab_file
_a : Union[str, Any] = monolingual_vocab_file
_a : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_a ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
_a : Union[str, Any] = {}
_a : int = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(_a ) not in self.fairseq_tokens_to_ids:
_a : int = cnt
cnt += 1
with open(_a ,'r' ,encoding='utf-8' ) as f:
for line in f.readlines():
_a : str = line.strip().split()[0]
_a : Tuple = len(self.fairseq_tokens_to_ids )
if str(_a ) not in self.fairseq_tokens_to_ids:
_a : List[str] = len(self.fairseq_tokens_to_ids )
_a : Tuple = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : Union[str, Any] ):
'''simple docstring'''
_a : int = self.__dict__.copy()
_a : str = None
_a : Optional[Any] = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Tuple ,_a : Tuple ):
'''simple docstring'''
_a : Tuple = d
# for backward compatibility
if not hasattr(self ,'sp_model_kwargs' ):
_a : List[str] = {}
_a : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def __lowercase ( self : Dict ,_a : List[int] ,_a : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_a : Dict = [self.cls_token_id]
_a : int = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __lowercase ( self : int ,_a : List[int] ,_a : Optional[List[int]] = None ,_a : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_a ,token_ids_a=_a ,already_has_special_tokens=_a )
if token_ids_a is None:
return [1] + ([0] * len(_a )) + [1]
return [1] + ([0] * len(_a )) + [1, 1] + ([0] * len(_a )) + [1]
def __lowercase ( self : Tuple ,_a : List[int] ,_a : Optional[List[int]] = None ):
'''simple docstring'''
_a : List[str] = [self.sep_token_id]
_a : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __lowercase ( self : Dict ):
'''simple docstring'''
return len(self.fairseq_ids_to_tokens )
def __lowercase ( self : Dict ):
'''simple docstring'''
_a : List[str] = {self.convert_ids_to_tokens(_a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __lowercase ( self : Tuple ,_a : str ):
'''simple docstring'''
return self.sp_model.encode(_a ,out_type=_a )
def __lowercase ( self : Union[str, Any] ,_a : Union[str, Any] ):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def __lowercase ( self : Any ,_a : int ):
'''simple docstring'''
return self.fairseq_ids_to_tokens[index]
def __lowercase ( self : Tuple ,_a : Union[str, Any] ):
'''simple docstring'''
_a : str = ''.join(_a ).replace(_a ,' ' ).strip()
return out_string
def __lowercase ( self : Union[str, Any] ,_a : str ,_a : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(_a ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_a : int = os.path.join(
_a ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
_a : int = os.path.join(
_a ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['monolingual_vocab_file'] ,)
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,_a )
elif not os.path.isfile(self.vocab_file ):
with open(_a ,'wb' ) as fi:
_a : List[Any] = self.sp_model.serialized_model_proto()
fi.write(_a )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
_a ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file ,_a )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(_a ,'w' ,encoding='utf-8' ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(F"""{str(_a )} \n""" )
return out_vocab_file, out_monolingual_vocab_file
| 271 | 1 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@property
def __lowercase ( self : List[str] ):
'''simple docstring'''
torch.manual_seed(0 )
_a : int = UNetaDModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=3 ,out_channels=3 ,down_block_types=('DownBlock2D', 'AttnDownBlock2D') ,up_block_types=('AttnUpBlock2D', 'UpBlock2D') ,)
return model
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
_a : str = self.dummy_uncond_unet
_a : int = PNDMScheduler()
_a : str = PNDMPipeline(unet=_a ,scheduler=_a )
pndm.to(_a )
pndm.set_progress_bar_config(disable=_a )
_a : Optional[int] = torch.manual_seed(0 )
_a : Optional[Any] = pndm(generator=_a ,num_inference_steps=20 ,output_type='numpy' ).images
_a : List[str] = torch.manual_seed(0 )
_a : Any = pndm(generator=_a ,num_inference_steps=20 ,output_type='numpy' ,return_dict=_a )[0]
_a : List[Any] = image[0, -3:, -3:, -1]
_a : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_a : List[Any] = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self : Tuple ):
'''simple docstring'''
_a : List[str] = 'google/ddpm-cifar10-32'
_a : str = UNetaDModel.from_pretrained(_a )
_a : Union[str, Any] = PNDMScheduler()
_a : Tuple = PNDMPipeline(unet=_a ,scheduler=_a )
pndm.to(_a )
pndm.set_progress_bar_config(disable=_a )
_a : str = torch.manual_seed(0 )
_a : Optional[Any] = pndm(generator=_a ,output_type='numpy' ).images
_a : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_a : Tuple = np.array([0.1564, 0.1_4645, 0.1406, 0.1_4715, 0.1_2425, 0.1_4045, 0.1_3115, 0.1_2175, 0.125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 271 |
'''simple docstring'''
import numpy as np
from transformers import BatchFeature
from transformers.testing_utils import require_tf, require_torch
from .test_feature_extraction_common import FeatureExtractionSavingTestMixin
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = None
__UpperCAmelCase : List[Any] = None
@property
def __lowercase ( self : Dict ):
'''simple docstring'''
return self.feat_extract_tester.prepare_feat_extract_dict()
def __lowercase ( self : str ):
'''simple docstring'''
_a : Dict = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(_a ,'feature_size' ) )
self.assertTrue(hasattr(_a ,'sampling_rate' ) )
self.assertTrue(hasattr(_a ,'padding_value' ) )
def __lowercase ( self : int ):
'''simple docstring'''
_a : Any = self.feat_extract_tester.prepare_inputs_for_common()
_a : str = self.feature_extraction_class(**self.feat_extract_dict )
_a : int = feat_extract.model_input_names[0]
_a : List[Any] = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(_a ) == len(_a ) for x, y in zip(_a ,processed_features[input_name] ) ) )
_a : Any = self.feat_extract_tester.prepare_inputs_for_common(equal_length=_a )
_a : Union[str, Any] = BatchFeature({input_name: speech_inputs} ,tensor_type='np' )
_a : Union[str, Any] = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
_a : Optional[int] = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_torch
def __lowercase ( self : Any ):
'''simple docstring'''
_a : List[Any] = self.feat_extract_tester.prepare_inputs_for_common(equal_length=_a )
_a : Dict = self.feature_extraction_class(**self.feat_extract_dict )
_a : int = feat_extract.model_input_names[0]
_a : str = BatchFeature({input_name: speech_inputs} ,tensor_type='pt' )
_a : str = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
_a : str = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_tf
def __lowercase ( self : int ):
'''simple docstring'''
_a : int = self.feat_extract_tester.prepare_inputs_for_common(equal_length=_a )
_a : Dict = self.feature_extraction_class(**self.feat_extract_dict )
_a : Tuple = feat_extract.model_input_names[0]
_a : int = BatchFeature({input_name: speech_inputs} ,tensor_type='tf' )
_a : Optional[int] = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
_a : Optional[Any] = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
def __lowercase ( self : Dict ,_a : Any=False ):
'''simple docstring'''
def _inputs_have_equal_length(_a : Tuple ):
_a : Tuple = len(input[0] )
for input_slice in input[1:]:
if len(_a ) != length:
return False
return True
def _inputs_are_equal(_a : Optional[Any] ,_a : Union[str, Any] ):
if len(_a ) != len(_a ):
return False
for input_slice_a, input_slice_a in zip(_a ,_a ):
if not np.allclose(np.asarray(_a ) ,np.asarray(_a ) ,atol=1E-3 ):
return False
return True
_a : int = self.feature_extraction_class(**self.feat_extract_dict )
_a : Tuple = self.feat_extract_tester.prepare_inputs_for_common(numpify=_a )
_a : Union[str, Any] = feat_extract.model_input_names[0]
_a : Tuple = BatchFeature({input_name: speech_inputs} )
_a : str = self.feat_extract_tester.seq_length_diff
_a : Dict = self.feat_extract_tester.max_seq_length + pad_diff
_a : Dict = self.feat_extract_tester.min_seq_length
_a : Optional[Any] = self.feat_extract_tester.batch_size
_a : Tuple = self.feat_extract_tester.feature_size
# test padding for List[int] + numpy
_a : int = feat_extract.pad(_a ,padding=_a )
_a : List[Any] = input_a[input_name]
_a : Tuple = feat_extract.pad(_a ,padding='longest' )
_a : Any = input_a[input_name]
_a : Optional[Any] = feat_extract.pad(_a ,padding='max_length' ,max_length=len(speech_inputs[-1] ) )
_a : List[str] = input_a[input_name]
_a : List[str] = feat_extract.pad(_a ,padding='longest' ,return_tensors='np' )
_a : str = input_a[input_name]
# max_length parameter has to be provided when setting `padding="max_length"`
with self.assertRaises(_a ):
feat_extract.pad(_a ,padding='max_length' )[input_name]
_a : int = feat_extract.pad(
_a ,padding='max_length' ,max_length=_a ,return_tensors='np' )
_a : Optional[int] = input_a[input_name]
self.assertFalse(_inputs_have_equal_length(_a ) )
self.assertTrue(_inputs_have_equal_length(_a ) )
self.assertTrue(_inputs_have_equal_length(_a ) )
self.assertTrue(_inputs_are_equal(_a ,_a ) )
self.assertTrue(len(input_a[0] ) == pad_min_length )
self.assertTrue(len(input_a[1] ) == pad_min_length + pad_diff )
self.assertTrue(input_a.shape[:2] == (batch_size, len(input_a[0] )) )
self.assertTrue(input_a.shape[:2] == (batch_size, pad_max_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == input_a.shape[2] == feature_size )
# test padding for `pad_to_multiple_of` for List[int] + numpy
_a : Tuple = feat_extract.pad(_a ,pad_to_multiple_of=10 )
_a : List[str] = input_a[input_name]
_a : str = feat_extract.pad(_a ,padding='longest' ,pad_to_multiple_of=10 )
_a : Tuple = input_a[input_name]
_a : Optional[int] = feat_extract.pad(
_a ,padding='max_length' ,pad_to_multiple_of=10 ,max_length=_a )
_a : Any = input_a[input_name]
_a : Optional[int] = feat_extract.pad(
_a ,padding='max_length' ,pad_to_multiple_of=10 ,max_length=_a ,return_tensors='np' ,)
_a : Dict = input_a[input_name]
self.assertTrue(all(len(_a ) % 10 == 0 for x in input_a ) )
self.assertTrue(_inputs_are_equal(_a ,_a ) )
_a : List[str] = pad_max_length if pad_max_length % 10 == 0 else (pad_max_length // 10 + 1) * 10
self.assertTrue(all(len(_a ) == expected_mult_pad_length for x in input_a ) )
self.assertEqual(input_a.shape[:2] ,(batch_size, expected_mult_pad_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == feature_size )
# Check padding value is correct
_a : Any = (np.ones(self.feat_extract_tester.feature_size ) * feat_extract.padding_value).sum()
self.assertTrue(
abs(np.asarray(input_a[0] )[pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) )
< 1E-3 )
self.assertTrue(
abs(
np.asarray(input_a[1] )[pad_min_length + pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - pad_diff) )
< 1E-3 )
self.assertTrue(
abs(
np.asarray(input_a[2] )[pad_min_length + 2 * pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - 2 * pad_diff) )
< 1E-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) ) < 1E-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (expected_mult_pad_length - pad_min_length) )
< 1E-3 )
def __lowercase ( self : List[Any] ,_a : Optional[int]=False ):
'''simple docstring'''
def _inputs_have_equal_length(_a : List[str] ):
_a : Union[str, Any] = len(input[0] )
for input_slice in input[1:]:
if len(_a ) != length:
return False
return True
def _inputs_are_equal(_a : List[str] ,_a : List[str] ):
if len(_a ) != len(_a ):
return False
for input_slice_a, input_slice_a in zip(_a ,_a ):
if not np.allclose(np.asarray(_a ) ,np.asarray(_a ) ,atol=1E-3 ):
return False
return True
_a : Dict = self.feature_extraction_class(**self.feat_extract_dict )
_a : str = self.feat_extract_tester.prepare_inputs_for_common(numpify=_a )
_a : Any = feat_extract.model_input_names[0]
_a : List[Any] = BatchFeature({input_name: speech_inputs} )
# truncate to smallest
_a : Union[str, Any] = feat_extract.pad(
_a ,padding='max_length' ,max_length=len(speech_inputs[0] ) ,truncation=_a )
_a : str = input_a[input_name]
_a : List[str] = feat_extract.pad(_a ,padding='max_length' ,max_length=len(speech_inputs[0] ) )
_a : Tuple = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(_a ) )
self.assertFalse(_inputs_have_equal_length(_a ) )
# truncate to smallest with np
_a : Dict = feat_extract.pad(
_a ,padding='max_length' ,max_length=len(speech_inputs[0] ) ,return_tensors='np' ,truncation=_a ,)
_a : Any = input_a[input_name]
_a : List[Any] = feat_extract.pad(
_a ,padding='max_length' ,max_length=len(speech_inputs[0] ) ,return_tensors='np' )
_a : int = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(_a ) )
self.assertTrue(input_a.shape[1] == len(speech_inputs[0] ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(_a ) )
# truncate to middle
_a : Dict = feat_extract.pad(
_a ,padding='max_length' ,max_length=len(speech_inputs[1] ) ,truncation=_a ,return_tensors='np' ,)
_a : List[Any] = input_a[input_name]
_a : Tuple = feat_extract.pad(
_a ,padding='max_length' ,max_length=len(speech_inputs[1] ) ,truncation=_a )
_a : Tuple = input_a[input_name]
_a : Tuple = feat_extract.pad(
_a ,padding='max_length' ,max_length=len(speech_inputs[1] ) ,return_tensors='np' )
_a : Dict = input_a[input_name]
self.assertTrue(input_a.shape[1] == len(speech_inputs[1] ) )
self.assertTrue(_inputs_have_equal_length(_a ) )
self.assertTrue(_inputs_have_equal_length(_a ) )
self.assertTrue(_inputs_are_equal(_a ,_a ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(_a ) )
self.assertTrue(len(input_a[-1] ) == len(speech_inputs[-1] ) )
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(_a ):
feat_extract.pad(_a ,truncation=_a )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(_a ):
feat_extract.pad(_a ,padding='longest' ,truncation=_a )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(_a ):
feat_extract.pad(_a ,padding='longest' ,truncation=_a )[input_name]
# max_length parameter has to be provided when setting `truncation=True` and padding="max_length"
with self.assertRaises(_a ):
feat_extract.pad(_a ,padding='max_length' ,truncation=_a )[input_name]
# test truncation for `pad_to_multiple_of` for List[int] + numpy
_a : Optional[Any] = 12
_a : List[Any] = feat_extract.pad(
_a ,padding='max_length' ,max_length=len(speech_inputs[0] ) ,pad_to_multiple_of=_a ,truncation=_a ,)
_a : Tuple = input_a[input_name]
_a : str = feat_extract.pad(
_a ,padding='max_length' ,max_length=len(speech_inputs[0] ) ,pad_to_multiple_of=_a ,)
_a : List[Any] = input_a[input_name]
# retrieve expected_length as multiple of pad_to_multiple_of
_a : List[Any] = len(speech_inputs[0] )
if expected_length % pad_to_multiple_of != 0:
_a : Union[str, Any] = ((len(speech_inputs[0] ) // pad_to_multiple_of) + 1) * pad_to_multiple_of
self.assertTrue(len(input_a[0] ) == expected_length )
self.assertTrue(_inputs_have_equal_length(_a ) )
self.assertFalse(_inputs_have_equal_length(_a ) )
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
self._check_padding(numpify=_a )
def __lowercase ( self : Tuple ):
'''simple docstring'''
self._check_padding(numpify=_a )
def __lowercase ( self : Dict ):
'''simple docstring'''
self._check_truncation(numpify=_a )
def __lowercase ( self : str ):
'''simple docstring'''
self._check_truncation(numpify=_a )
@require_torch
def __lowercase ( self : Dict ):
'''simple docstring'''
_a : Any = self.feature_extraction_class(**self.feat_extract_dict )
_a : List[Any] = self.feat_extract_tester.prepare_inputs_for_common()
_a : Union[str, Any] = feat_extract.model_input_names[0]
_a : Optional[int] = BatchFeature({input_name: speech_inputs} )
_a : List[Any] = feat_extract.pad(_a ,padding='longest' ,return_tensors='np' )[input_name]
_a : List[str] = feat_extract.pad(_a ,padding='longest' ,return_tensors='pt' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1E-2 )
@require_tf
def __lowercase ( self : int ):
'''simple docstring'''
_a : List[str] = self.feature_extraction_class(**self.feat_extract_dict )
_a : Optional[Any] = self.feat_extract_tester.prepare_inputs_for_common()
_a : Dict = feat_extract.model_input_names[0]
_a : Optional[Any] = BatchFeature({input_name: speech_inputs} )
_a : Dict = feat_extract.pad(_a ,padding='longest' ,return_tensors='np' )[input_name]
_a : Any = feat_extract.pad(_a ,padding='longest' ,return_tensors='tf' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_tf.numpy().astype(np.floataa ).sum() ) < 1E-2 )
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
_a : str = self.feat_extract_dict
_a : List[Any] = True
_a : Optional[int] = self.feature_extraction_class(**_a )
_a : List[Any] = self.feat_extract_tester.prepare_inputs_for_common()
_a : Tuple = [len(_a ) for x in speech_inputs]
_a : int = feat_extract.model_input_names[0]
_a : Optional[Any] = BatchFeature({input_name: speech_inputs} )
_a : str = feat_extract.pad(_a ,padding='longest' ,return_tensors='np' )
self.assertIn('attention_mask' ,_a )
self.assertListEqual(list(processed.attention_mask.shape ) ,list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() ,_a )
def __lowercase ( self : int ):
'''simple docstring'''
_a : Any = self.feat_extract_dict
_a : Tuple = True
_a : Optional[int] = self.feature_extraction_class(**_a )
_a : Dict = self.feat_extract_tester.prepare_inputs_for_common()
_a : Dict = [len(_a ) for x in speech_inputs]
_a : Union[str, Any] = feat_extract.model_input_names[0]
_a : Any = BatchFeature({input_name: speech_inputs} )
_a : List[Any] = min(_a )
_a : Dict = feat_extract.pad(
_a ,padding='max_length' ,max_length=_a ,truncation=_a ,return_tensors='np' )
self.assertIn('attention_mask' ,_a )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) ,[processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() ,[max_length for x in speech_inputs] )
| 271 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.