code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
import argparse
import os
import shutil
from pathlib import Path
import onnx
import torch
from packaging import version
from torch.onnx import export
from diffusers import OnnxRuntimeModel, OnnxStableDiffusionPipeline, StableDiffusionPipeline
__magic_name__ = version.parse(version.parse(torch.__version__).base_version) < version.parse('''1.11''')
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=False , ):
'''simple docstring'''
output_path.parent.mkdir(parents=lowerCAmelCase_ , exist_ok=lowerCAmelCase_)
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
lowerCAmelCase_ , lowerCAmelCase_ , f=output_path.as_posix() , input_names=lowerCAmelCase_ , output_names=lowerCAmelCase_ , dynamic_axes=lowerCAmelCase_ , do_constant_folding=lowerCAmelCase_ , use_external_data_format=lowerCAmelCase_ , enable_onnx_checker=lowerCAmelCase_ , opset_version=lowerCAmelCase_ , )
else:
export(
lowerCAmelCase_ , lowerCAmelCase_ , f=output_path.as_posix() , input_names=lowerCAmelCase_ , output_names=lowerCAmelCase_ , dynamic_axes=lowerCAmelCase_ , do_constant_folding=lowerCAmelCase_ , opset_version=lowerCAmelCase_ , )
@torch.no_grad()
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = False):
'''simple docstring'''
lowerCamelCase_ : str = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
lowerCamelCase_ : Optional[Any] = "cuda"
elif fpaa and not torch.cuda.is_available():
raise ValueError("`float16` model export is only supported on GPUs with CUDA")
else:
lowerCamelCase_ : Tuple = "cpu"
lowerCamelCase_ : Any = StableDiffusionPipeline.from_pretrained(lowerCAmelCase_ , torch_dtype=lowerCAmelCase_).to(lowerCAmelCase_)
lowerCamelCase_ : Any = Path(lowerCAmelCase_)
# TEXT ENCODER
lowerCamelCase_ : str = pipeline.text_encoder.config.max_position_embeddings
lowerCamelCase_ : Dict = pipeline.text_encoder.config.hidden_size
lowerCamelCase_ : str = pipeline.tokenizer(
"A sample prompt" , padding="max_length" , max_length=pipeline.tokenizer.model_max_length , truncation=lowerCAmelCase_ , return_tensors="pt" , )
onnx_export(
pipeline.text_encoder , model_args=(text_input.input_ids.to(device=lowerCAmelCase_ , dtype=torch.intaa)) , output_path=output_path / "text_encoder" / "model.onnx" , ordered_input_names=["input_ids"] , output_names=["last_hidden_state", "pooler_output"] , dynamic_axes={
"input_ids": {0: "batch", 1: "sequence"},
} , opset=lowerCAmelCase_ , )
del pipeline.text_encoder
# UNET
lowerCamelCase_ : List[Any] = pipeline.unet.config.in_channels
lowerCamelCase_ : List[Any] = pipeline.unet.config.sample_size
lowerCamelCase_ : Dict = output_path / "unet" / "model.onnx"
onnx_export(
pipeline.unet , model_args=(
torch.randn(2 , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_).to(device=lowerCAmelCase_ , dtype=lowerCAmelCase_),
torch.randn(2).to(device=lowerCAmelCase_ , dtype=lowerCAmelCase_),
torch.randn(2 , lowerCAmelCase_ , lowerCAmelCase_).to(device=lowerCAmelCase_ , dtype=lowerCAmelCase_),
False,
) , output_path=lowerCAmelCase_ , ordered_input_names=["sample", "timestep", "encoder_hidden_states", "return_dict"] , output_names=["out_sample"] , dynamic_axes={
"sample": {0: "batch", 1: "channels", 2: "height", 3: "width"},
"timestep": {0: "batch"},
"encoder_hidden_states": {0: "batch", 1: "sequence"},
} , opset=lowerCAmelCase_ , use_external_data_format=lowerCAmelCase_ , )
lowerCamelCase_ : List[str] = str(unet_path.absolute().as_posix())
lowerCamelCase_ : Any = os.path.dirname(lowerCAmelCase_)
lowerCamelCase_ : Any = onnx.load(lowerCAmelCase_)
# clean up existing tensor files
shutil.rmtree(lowerCAmelCase_)
os.mkdir(lowerCAmelCase_)
# collate external tensor files into one
onnx.save_model(
lowerCAmelCase_ , lowerCAmelCase_ , save_as_external_data=lowerCAmelCase_ , all_tensors_to_one_file=lowerCAmelCase_ , location="weights.pb" , convert_attribute=lowerCAmelCase_ , )
del pipeline.unet
# VAE ENCODER
lowerCamelCase_ : Dict = pipeline.vae
lowerCamelCase_ : Tuple = vae_encoder.config.in_channels
lowerCamelCase_ : Optional[Any] = vae_encoder.config.sample_size
# need to get the raw tensor output (sample) from the encoder
lowerCamelCase_ : List[str] = lambda lowerCAmelCase_ , lowerCAmelCase_: vae_encoder.encode(lowerCAmelCase_ , lowerCAmelCase_)[0].sample()
onnx_export(
lowerCAmelCase_ , model_args=(
torch.randn(1 , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_).to(device=lowerCAmelCase_ , dtype=lowerCAmelCase_),
False,
) , output_path=output_path / "vae_encoder" / "model.onnx" , ordered_input_names=["sample", "return_dict"] , output_names=["latent_sample"] , dynamic_axes={
"sample": {0: "batch", 1: "channels", 2: "height", 3: "width"},
} , opset=lowerCAmelCase_ , )
# VAE DECODER
lowerCamelCase_ : Tuple = pipeline.vae
lowerCamelCase_ : Dict = vae_decoder.config.latent_channels
lowerCamelCase_ : Dict = vae_decoder.config.out_channels
# forward only through the decoder part
lowerCamelCase_ : List[str] = vae_encoder.decode
onnx_export(
lowerCAmelCase_ , model_args=(
torch.randn(1 , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_).to(device=lowerCAmelCase_ , dtype=lowerCAmelCase_),
False,
) , output_path=output_path / "vae_decoder" / "model.onnx" , ordered_input_names=["latent_sample", "return_dict"] , output_names=["sample"] , dynamic_axes={
"latent_sample": {0: "batch", 1: "channels", 2: "height", 3: "width"},
} , opset=lowerCAmelCase_ , )
del pipeline.vae
# SAFETY CHECKER
if pipeline.safety_checker is not None:
lowerCamelCase_ : Any = pipeline.safety_checker
lowerCamelCase_ : Dict = safety_checker.config.vision_config.num_channels
lowerCamelCase_ : Any = safety_checker.config.vision_config.image_size
lowerCamelCase_ : Optional[Any] = safety_checker.forward_onnx
onnx_export(
pipeline.safety_checker , model_args=(
torch.randn(
1 , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ).to(device=lowerCAmelCase_ , dtype=lowerCAmelCase_),
torch.randn(1 , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_).to(device=lowerCAmelCase_ , dtype=lowerCAmelCase_),
) , output_path=output_path / "safety_checker" / "model.onnx" , ordered_input_names=["clip_input", "images"] , output_names=["out_images", "has_nsfw_concepts"] , dynamic_axes={
"clip_input": {0: "batch", 1: "channels", 2: "height", 3: "width"},
"images": {0: "batch", 1: "height", 2: "width", 3: "channels"},
} , opset=lowerCAmelCase_ , )
del pipeline.safety_checker
lowerCamelCase_ : str = OnnxRuntimeModel.from_pretrained(output_path / "safety_checker")
lowerCamelCase_ : Any = pipeline.feature_extractor
else:
lowerCamelCase_ : Optional[Any] = None
lowerCamelCase_ : Optional[Any] = None
lowerCamelCase_ : Union[str, Any] = OnnxStableDiffusionPipeline(
vae_encoder=OnnxRuntimeModel.from_pretrained(output_path / "vae_encoder") , vae_decoder=OnnxRuntimeModel.from_pretrained(output_path / "vae_decoder") , text_encoder=OnnxRuntimeModel.from_pretrained(output_path / "text_encoder") , tokenizer=pipeline.tokenizer , unet=OnnxRuntimeModel.from_pretrained(output_path / "unet") , scheduler=pipeline.scheduler , safety_checker=lowerCAmelCase_ , feature_extractor=lowerCAmelCase_ , requires_safety_checker=safety_checker is not None , )
onnx_pipeline.save_pretrained(lowerCAmelCase_)
print("ONNX pipeline saved to" , lowerCAmelCase_)
del pipeline
del onnx_pipeline
lowerCamelCase_ : List[Any] = OnnxStableDiffusionPipeline.from_pretrained(lowerCAmelCase_ , provider="CPUExecutionProvider")
print("ONNX pipeline is loadable")
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
parser.add_argument(
'''--model_path''',
type=str,
required=True,
help='''Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).''',
)
parser.add_argument('''--output_path''', type=str, required=True, help='''Path to the output model.''')
parser.add_argument(
'''--opset''',
default=1_4,
type=int,
help='''The version of the ONNX operator set to use.''',
)
parser.add_argument('''--fp16''', action='''store_true''', default=False, help='''Export the models in `float16` mode''')
__magic_name__ = parser.parse_args()
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
| 250
|
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
__magic_name__ = logging.get_logger(__name__)
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
def __init__( self , *a_ , **a_ ):
warnings.warn(
"The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use BeitImageProcessor instead." , a_ , )
super().__init__(*a_ , **a_ )
| 250
| 1
|
'''simple docstring'''
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def _lowerCamelCase ( self , _snake_case ):
"""simple docstring"""
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['''bs'''] , model_result['''ss'''] ):
__lowerCamelCase = model_result['''result'''][batch_size][sequence_length]
self.assertIsNotNone(_snake_case )
def _lowerCamelCase ( self ):
"""simple docstring"""
__lowerCamelCase = '''sshleifer/tiny-gpt2'''
__lowerCamelCase = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=_snake_case , multi_process=_snake_case , )
__lowerCamelCase = TensorFlowBenchmark(_snake_case )
__lowerCamelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _lowerCamelCase ( self ):
"""simple docstring"""
__lowerCamelCase = '''sgugger/tiny-distilbert-classification'''
__lowerCamelCase = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , only_pretrain_model=_snake_case , )
__lowerCamelCase = TensorFlowBenchmark(_snake_case )
__lowerCamelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _lowerCamelCase ( self ):
"""simple docstring"""
__lowerCamelCase = '''sshleifer/tiny-gpt2'''
__lowerCamelCase = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , )
__lowerCamelCase = TensorFlowBenchmark(_snake_case )
__lowerCamelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _lowerCamelCase ( self ):
"""simple docstring"""
__lowerCamelCase = '''sshleifer/tiny-gpt2'''
__lowerCamelCase = AutoConfig.from_pretrained(_snake_case )
__lowerCamelCase = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=_snake_case , multi_process=_snake_case , )
__lowerCamelCase = TensorFlowBenchmark(_snake_case , [config] )
__lowerCamelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _lowerCamelCase ( self ):
"""simple docstring"""
__lowerCamelCase = '''sshleifer/tiny-gpt2'''
__lowerCamelCase = AutoConfig.from_pretrained(_snake_case )
__lowerCamelCase = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , )
__lowerCamelCase = TensorFlowBenchmark(_snake_case , [config] )
__lowerCamelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _lowerCamelCase ( self ):
"""simple docstring"""
__lowerCamelCase = '''sshleifer/tiny-gpt2'''
__lowerCamelCase = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , )
__lowerCamelCase = TensorFlowBenchmark(_snake_case )
__lowerCamelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _lowerCamelCase ( self ):
"""simple docstring"""
__lowerCamelCase = '''sshleifer/tiny-gpt2'''
__lowerCamelCase = AutoConfig.from_pretrained(_snake_case )
__lowerCamelCase = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , )
__lowerCamelCase = TensorFlowBenchmark(_snake_case , [config] )
__lowerCamelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _lowerCamelCase ( self ):
"""simple docstring"""
__lowerCamelCase = '''patrickvonplaten/t5-tiny-random'''
__lowerCamelCase = AutoConfig.from_pretrained(_snake_case )
__lowerCamelCase = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , )
__lowerCamelCase = TensorFlowBenchmark(_snake_case , configs=[config] )
__lowerCamelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices('''GPU''' ) ) == 0 , '''Cannot do xla on CPU.''' )
def _lowerCamelCase ( self ):
"""simple docstring"""
__lowerCamelCase = '''sshleifer/tiny-gpt2'''
__lowerCamelCase = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , use_xla=_snake_case , multi_process=_snake_case , )
__lowerCamelCase = TensorFlowBenchmark(_snake_case )
__lowerCamelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _lowerCamelCase ( self ):
"""simple docstring"""
__lowerCamelCase = '''sshleifer/tiny-gpt2'''
with tempfile.TemporaryDirectory() as tmp_dir:
__lowerCamelCase = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=_snake_case , save_to_csv=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(_snake_case , '''inf_time.csv''' ) , inference_memory_csv_file=os.path.join(_snake_case , '''inf_mem.csv''' ) , env_info_csv_file=os.path.join(_snake_case , '''env.csv''' ) , multi_process=_snake_case , )
__lowerCamelCase = TensorFlowBenchmark(_snake_case )
benchmark.run()
self.assertTrue(Path(os.path.join(_snake_case , '''inf_time.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(_snake_case , '''inf_mem.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(_snake_case , '''env.csv''' ) ).exists() )
def _lowerCamelCase ( self ):
"""simple docstring"""
__lowerCamelCase = '''sshleifer/tiny-gpt2'''
def _check_summary_is_not_empty(_snake_case ):
self.assertTrue(hasattr(_snake_case , '''sequential''' ) )
self.assertTrue(hasattr(_snake_case , '''cumulative''' ) )
self.assertTrue(hasattr(_snake_case , '''current''' ) )
self.assertTrue(hasattr(_snake_case , '''total''' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
__lowerCamelCase = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(_snake_case , '''log.txt''' ) , log_print=_snake_case , trace_memory_line_by_line=_snake_case , eager_mode=_snake_case , multi_process=_snake_case , )
__lowerCamelCase = TensorFlowBenchmark(_snake_case )
__lowerCamelCase = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
self.assertTrue(Path(os.path.join(_snake_case , '''log.txt''' ) ).exists() )
| 575
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_UpperCamelCase : Tuple ={
"configuration_graphormer": ["GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "GraphormerConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Optional[Any] =[
"GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"GraphormerForGraphClassification",
"GraphormerModel",
"GraphormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_graphormer import (
GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
GraphormerForGraphClassification,
GraphormerModel,
GraphormerPreTrainedModel,
)
else:
import sys
_UpperCamelCase : int =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 575
| 1
|
'''simple docstring'''
import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForWholeWordMask,
HfArgumentParser,
LineByLineTextDataset,
LineByLineWithRefDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
UpperCamelCase =logging.getLogger(__name__)
UpperCamelCase =list(MODEL_WITH_LM_HEAD_MAPPING.keys())
UpperCamelCase =tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class A :
"""simple docstring"""
__a : Dict = field(
default=a__, metadata={
'''help''': (
'''The model checkpoint for weights initialization. Leave None if you want to train a model from'''
''' scratch.'''
)
}, )
__a : Any = field(
default=a__, metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(a__ )}, )
__a : str = field(
default=a__, metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
__a : Dict = field(
default=a__, metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
__a : Any = field(
default=a__, metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''}, )
@dataclass
class A :
"""simple docstring"""
__a : Optional[Any] = field(
default=a__, metadata={'''help''': '''The input training data file (a text file).'''} )
__a : Any = field(
default=a__, metadata={
'''help''': (
'''The input training data files (multiple files in glob format). '''
'''Very often splitting large files to smaller files can prevent tokenizer going out of memory'''
)
}, )
__a : List[Any] = field(
default=a__, metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''}, )
__a : int = field(
default=a__, metadata={'''help''': '''An optional input train ref data file for whole word mask in Chinese.'''}, )
__a : Optional[Any] = field(
default=a__, metadata={'''help''': '''An optional input eval ref data file for whole word mask in Chinese.'''}, )
__a : Dict = field(
default=a__, metadata={'''help''': '''Whether distinct lines of text in the dataset are to be handled as distinct sequences.'''}, )
__a : Union[str, Any] = field(
default=a__, metadata={'''help''': '''Train with masked-language modeling loss instead of language modeling.'''} )
__a : List[Any] = field(default=a__, metadata={'''help''': '''Whether ot not to use whole word mask.'''} )
__a : List[Any] = field(
default=0.15, metadata={'''help''': '''Ratio of tokens to mask for masked language modeling loss'''} )
__a : int = field(
default=1 / 6, metadata={
'''help''': (
'''Ratio of length of a span of masked tokens to surrounding context length for permutation language'''
''' modeling.'''
)
}, )
__a : Dict = field(
default=5, metadata={'''help''': '''Maximum length of a span of masked tokens for permutation language modeling.'''} )
__a : Union[str, Any] = field(
default=-1, metadata={
'''help''': (
'''Optional input sequence length after tokenization.'''
'''The training dataset will be truncated in block of this size for training.'''
'''Default to the model max input length for single sentence inputs (take into account special tokens).'''
)
}, )
__a : Any = field(
default=a__, metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
def snake_case ( a_ : DataTrainingArguments , a_ : PreTrainedTokenizer , a_ : bool = False , a_ : Optional[str] = None , ) -> Tuple:
"""simple docstring"""
def _dataset(a_ : Union[str, Any] , a_ : int=None ):
if args.line_by_line:
if ref_path is not None:
if not args.whole_word_mask or not args.mlm:
raise ValueError("""You need to set world whole masking and mlm to True for Chinese Whole Word Mask""" )
return LineByLineWithRefDataset(
tokenizer=UpperCAmelCase__ , file_path=UpperCAmelCase__ , block_size=args.block_size , ref_path=UpperCAmelCase__ , )
return LineByLineTextDataset(tokenizer=UpperCAmelCase__ , file_path=UpperCAmelCase__ , block_size=args.block_size )
else:
return TextDataset(
tokenizer=UpperCAmelCase__ , file_path=UpperCAmelCase__ , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=UpperCAmelCase__ , )
if evaluate:
return _dataset(args.eval_data_file , args.eval_ref_file )
elif args.train_data_files:
return ConcatDataset([_dataset(UpperCAmelCase__ ) for f in glob(args.train_data_files )] )
else:
return _dataset(args.train_data_file , args.train_ref_file )
def snake_case ( ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ : Optional[int] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
UpperCamelCase_ : Optional[int] = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
"""Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file """
"""or remove the --do_eval argument.""" )
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. Use"
""" --overwrite_output_dir to overcome.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("""Training/evaluation parameters %s""" , UpperCAmelCase__ )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
UpperCamelCase_ : List[Any] = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
UpperCamelCase_ : Tuple = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
UpperCamelCase_ : Optional[Any] = CONFIG_MAPPING[model_args.model_type]()
logger.warning("""You are instantiating a new config instance from scratch.""" )
if model_args.tokenizer_name:
UpperCamelCase_ : str = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
UpperCamelCase_ : Optional[Any] = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
raise ValueError(
"""You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another"""
""" script, save it,and load it from here, using --tokenizer_name""" )
if model_args.model_name_or_path:
UpperCamelCase_ : str = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=UpperCAmelCase__ , cache_dir=model_args.cache_dir , )
else:
logger.info("""Training new model from scratch""" )
UpperCamelCase_ : Tuple = AutoModelWithLMHead.from_config(UpperCAmelCase__ )
model.resize_token_embeddings(len(UpperCAmelCase__ ) )
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
"""BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the"""
"""--mlm flag (masked language modeling).""" )
if data_args.block_size <= 0:
UpperCamelCase_ : Union[str, Any] = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
UpperCamelCase_ : Optional[Any] = min(data_args.block_size , tokenizer.max_len )
# Get datasets
UpperCamelCase_ : int = (
get_dataset(UpperCAmelCase__ , tokenizer=UpperCAmelCase__ , cache_dir=model_args.cache_dir ) if training_args.do_train else None
)
UpperCamelCase_ : List[Any] = (
get_dataset(UpperCAmelCase__ , tokenizer=UpperCAmelCase__ , evaluate=UpperCAmelCase__ , cache_dir=model_args.cache_dir )
if training_args.do_eval
else None
)
if config.model_type == "xlnet":
UpperCamelCase_ : Optional[int] = DataCollatorForPermutationLanguageModeling(
tokenizer=UpperCAmelCase__ , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , )
else:
if data_args.mlm and data_args.whole_word_mask:
UpperCamelCase_ : Optional[int] = DataCollatorForWholeWordMask(
tokenizer=UpperCAmelCase__ , mlm_probability=data_args.mlm_probability )
else:
UpperCamelCase_ : Any = DataCollatorForLanguageModeling(
tokenizer=UpperCAmelCase__ , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
UpperCamelCase_ : Optional[int] = Trainer(
model=UpperCAmelCase__ , args=UpperCAmelCase__ , data_collator=UpperCAmelCase__ , train_dataset=UpperCAmelCase__ , eval_dataset=UpperCAmelCase__ , prediction_loss_only=UpperCAmelCase__ , )
# Training
if training_args.do_train:
UpperCamelCase_ : Union[str, Any] = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path )
else None
)
trainer.train(model_path=UpperCAmelCase__ )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
UpperCamelCase_ : List[str] = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
UpperCamelCase_ : Dict = trainer.evaluate()
UpperCamelCase_ : int = math.exp(eval_output["""eval_loss"""] )
UpperCamelCase_ : str = {'perplexity': perplexity}
UpperCamelCase_ : Tuple = os.path.join(training_args.output_dir , """eval_results_lm.txt""" )
if trainer.is_world_master():
with open(UpperCAmelCase__ , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key in sorted(result.keys() ):
logger.info(""" %s = %s""" , UpperCAmelCase__ , str(result[key] ) )
writer.write("""%s = %s\n""" % (key, str(result[key] )) )
results.update(UpperCAmelCase__ )
return results
def snake_case ( a_ : List[str] ) -> Optional[int]:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 208
|
'''simple docstring'''
from __future__ import annotations
def UpperCAmelCase ( UpperCAmelCase__ : int):
lowerCamelCase : Optional[int] = str(UpperCAmelCase__)
return len(UpperCAmelCase__) == 9 and set(UpperCAmelCase__) == set('123456789')
def UpperCAmelCase ( ):
for base_num in range(99_99 , 49_99 , -1):
lowerCamelCase : Dict = 10_00_02 * base_num
if is_9_pandigital(UpperCAmelCase__):
return candidate
for base_num in range(3_33 , 99 , -1):
lowerCamelCase : Tuple = 1_00_20_03 * base_num
if is_9_pandigital(UpperCAmelCase__):
return candidate
return None
if __name__ == "__main__":
print(f"""{solution() = }""")
| 320
| 0
|
import re
from filelock import FileLock
try:
import nltk
lowerCAmelCase_ : List[Any] = True
except (ImportError, ModuleNotFoundError):
lowerCAmelCase_ : str = False
if NLTK_AVAILABLE:
with FileLock('.lock') as lock:
nltk.download('punkt', quiet=True)
def UpperCAmelCase ( A : str ):
re.sub('''<n>''' , '''''' , A ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(A ) )
| 717
|
'''simple docstring'''
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all image processors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...image_processing_utils import ImageProcessingMixin
from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
lowerCAmelCase_ : str = logging.get_logger(__name__)
lowerCAmelCase_ : List[Any] = OrderedDict(
[
('align', 'EfficientNetImageProcessor'),
('beit', 'BeitImageProcessor'),
('bit', 'BitImageProcessor'),
('blip', 'BlipImageProcessor'),
('blip-2', 'BlipImageProcessor'),
('bridgetower', 'BridgeTowerImageProcessor'),
('chinese_clip', 'ChineseCLIPImageProcessor'),
('clip', 'CLIPImageProcessor'),
('clipseg', 'ViTImageProcessor'),
('conditional_detr', 'ConditionalDetrImageProcessor'),
('convnext', 'ConvNextImageProcessor'),
('convnextv2', 'ConvNextImageProcessor'),
('cvt', 'ConvNextImageProcessor'),
('data2vec-vision', 'BeitImageProcessor'),
('deformable_detr', 'DeformableDetrImageProcessor'),
('deit', 'DeiTImageProcessor'),
('deta', 'DetaImageProcessor'),
('detr', 'DetrImageProcessor'),
('dinat', 'ViTImageProcessor'),
('donut-swin', 'DonutImageProcessor'),
('dpt', 'DPTImageProcessor'),
('efficientformer', 'EfficientFormerImageProcessor'),
('efficientnet', 'EfficientNetImageProcessor'),
('flava', 'FlavaImageProcessor'),
('focalnet', 'BitImageProcessor'),
('git', 'CLIPImageProcessor'),
('glpn', 'GLPNImageProcessor'),
('groupvit', 'CLIPImageProcessor'),
('imagegpt', 'ImageGPTImageProcessor'),
('instructblip', 'BlipImageProcessor'),
('layoutlmv2', 'LayoutLMv2ImageProcessor'),
('layoutlmv3', 'LayoutLMv3ImageProcessor'),
('levit', 'LevitImageProcessor'),
('mask2former', 'Mask2FormerImageProcessor'),
('maskformer', 'MaskFormerImageProcessor'),
('mgp-str', 'ViTImageProcessor'),
('mobilenet_v1', 'MobileNetV1ImageProcessor'),
('mobilenet_v2', 'MobileNetV2ImageProcessor'),
('mobilevit', 'MobileViTImageProcessor'),
('mobilevit', 'MobileViTImageProcessor'),
('mobilevitv2', 'MobileViTImageProcessor'),
('nat', 'ViTImageProcessor'),
('oneformer', 'OneFormerImageProcessor'),
('owlvit', 'OwlViTImageProcessor'),
('perceiver', 'PerceiverImageProcessor'),
('pix2struct', 'Pix2StructImageProcessor'),
('poolformer', 'PoolFormerImageProcessor'),
('regnet', 'ConvNextImageProcessor'),
('resnet', 'ConvNextImageProcessor'),
('sam', 'SamImageProcessor'),
('segformer', 'SegformerImageProcessor'),
('swiftformer', 'ViTImageProcessor'),
('swin', 'ViTImageProcessor'),
('swin2sr', 'Swin2SRImageProcessor'),
('swinv2', 'ViTImageProcessor'),
('table-transformer', 'DetrImageProcessor'),
('timesformer', 'VideoMAEImageProcessor'),
('tvlt', 'TvltImageProcessor'),
('upernet', 'SegformerImageProcessor'),
('van', 'ConvNextImageProcessor'),
('videomae', 'VideoMAEImageProcessor'),
('vilt', 'ViltImageProcessor'),
('vit', 'ViTImageProcessor'),
('vit_hybrid', 'ViTHybridImageProcessor'),
('vit_mae', 'ViTImageProcessor'),
('vit_msn', 'ViTImageProcessor'),
('xclip', 'CLIPImageProcessor'),
('yolos', 'YolosImageProcessor'),
]
)
lowerCAmelCase_ : List[str] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES)
def UpperCAmelCase ( A : str ):
for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items():
if class_name in extractors:
SCREAMING_SNAKE_CASE : Tuple = model_type_to_module_name(A )
SCREAMING_SNAKE_CASE : List[Any] = importlib.import_module(F""".{module_name}""" , '''transformers.models''' )
try:
return getattr(A , A )
except AttributeError:
continue
for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items():
if getattr(A , '''__name__''' , A ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
SCREAMING_SNAKE_CASE : List[str] = importlib.import_module('''transformers''' )
if hasattr(A , A ):
return getattr(A , A )
return None
def UpperCAmelCase ( A : Union[str, os.PathLike] , A : Optional[Union[str, os.PathLike]] = None , A : bool = False , A : bool = False , A : Optional[Dict[str, str]] = None , A : Optional[Union[bool, str]] = None , A : Optional[str] = None , A : bool = False , **A : List[str] , ):
SCREAMING_SNAKE_CASE : List[Any] = get_file_from_repo(
A , A , cache_dir=A , force_download=A , resume_download=A , proxies=A , use_auth_token=A , revision=A , local_files_only=A , )
if resolved_config_file is None:
logger.info(
'''Could not locate the image processor configuration file, will try to use the model config instead.''' )
return {}
with open(A , encoding='''utf-8''' ) as reader:
return json.load(A )
class lowerCamelCase_ :
def __init__( self : Tuple ):
"""simple docstring"""
raise EnvironmentError(
'''AutoImageProcessor is designed to be instantiated '''
'''using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method.''' )
@classmethod
@replace_list_option_in_docstrings(lowerCAmelCase__ )
def __lowercase ( cls : int , lowerCAmelCase__ : Optional[int] , **lowerCAmelCase__ : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = kwargs.pop('''config''' , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Tuple = kwargs.pop('''trust_remote_code''' , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : List[str] = True
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : str = ImageProcessingMixin.get_image_processor_dict(lowerCAmelCase__ , **lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Tuple = config_dict.get('''image_processor_type''' , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : str = None
if "AutoImageProcessor" in config_dict.get('''auto_map''' , {} ):
SCREAMING_SNAKE_CASE : List[Any] = config_dict['''auto_map''']['''AutoImageProcessor''']
# If we still don't have the image processor class, check if we're loading from a previous feature extractor config
# and if so, infer the image processor class from there.
if image_processor_class is None and image_processor_auto_map is None:
SCREAMING_SNAKE_CASE : Optional[Any] = config_dict.pop('''feature_extractor_type''' , lowerCAmelCase__ )
if feature_extractor_class is not None:
logger.warning(
'''Could not find image processor class in the image processor config or the model config. Loading'''
''' based on pattern matching with the model\'s feature extractor configuration.''' )
SCREAMING_SNAKE_CASE : Optional[Any] = feature_extractor_class.replace('''FeatureExtractor''' , '''ImageProcessor''' )
if "AutoFeatureExtractor" in config_dict.get('''auto_map''' , {} ):
SCREAMING_SNAKE_CASE : Union[str, Any] = config_dict['''auto_map''']['''AutoFeatureExtractor''']
SCREAMING_SNAKE_CASE : str = feature_extractor_auto_map.replace('''FeatureExtractor''' , '''ImageProcessor''' )
logger.warning(
'''Could not find image processor auto map in the image processor config or the model config.'''
''' Loading based on pattern matching with the model\'s feature extractor configuration.''' )
# If we don't find the image processor class in the image processor config, let's try the model config.
if image_processor_class is None and image_processor_auto_map is None:
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE : List[str] = AutoConfig.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
# It could be in `config.image_processor_type``
SCREAMING_SNAKE_CASE : Any = getattr(lowerCAmelCase__ , '''image_processor_type''' , lowerCAmelCase__ )
if hasattr(lowerCAmelCase__ , '''auto_map''' ) and "AutoImageProcessor" in config.auto_map:
SCREAMING_SNAKE_CASE : Tuple = config.auto_map['''AutoImageProcessor''']
if image_processor_class is not None:
SCREAMING_SNAKE_CASE : List[str] = image_processor_class_from_name(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Tuple = image_processor_auto_map is not None
SCREAMING_SNAKE_CASE : str = image_processor_class is not None or type(lowerCAmelCase__ ) in IMAGE_PROCESSOR_MAPPING
SCREAMING_SNAKE_CASE : Union[str, Any] = resolve_trust_remote_code(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
if has_remote_code and trust_remote_code:
SCREAMING_SNAKE_CASE : List[Any] = get_class_from_dynamic_module(
lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = kwargs.pop('''code_revision''' , lowerCAmelCase__ )
if os.path.isdir(lowerCAmelCase__ ):
image_processor_class.register_for_auto_class()
return image_processor_class.from_dict(lowerCAmelCase__ , **lowerCAmelCase__ )
elif image_processor_class is not None:
return image_processor_class.from_dict(lowerCAmelCase__ , **lowerCAmelCase__ )
# Last try: we use the IMAGE_PROCESSOR_MAPPING.
elif type(lowerCAmelCase__ ) in IMAGE_PROCESSOR_MAPPING:
SCREAMING_SNAKE_CASE : List[str] = IMAGE_PROCESSOR_MAPPING[type(lowerCAmelCase__ )]
return image_processor_class.from_dict(lowerCAmelCase__ , **lowerCAmelCase__ )
raise ValueError(
F"""Unrecognized image processor in {pretrained_model_name_or_path}. Should have a """
F"""`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following """
F"""`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}""" )
@staticmethod
def __lowercase ( lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Optional[int] ):
"""simple docstring"""
IMAGE_PROCESSOR_MAPPING.register(lowerCAmelCase__ , lowerCAmelCase__ )
| 464
| 0
|
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import OwlViTImageProcessor, OwlViTProcessor
@require_vision
class __UpperCAmelCase ( unittest.TestCase ):
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = tempfile.mkdtemp()
# fmt: off
lowerCAmelCase_ = ['''''', '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>''']
# fmt: on
lowerCAmelCase_ = dict(zip(_lowerCamelCase , range(len(_lowerCamelCase ) ) ) )
lowerCAmelCase_ = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''', '''''']
lowerCAmelCase_ = {'''unk_token''': '''<unk>'''}
lowerCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_lowerCamelCase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(_lowerCamelCase ) )
lowerCAmelCase_ = {
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
'''image_std''': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
}
lowerCAmelCase_ = os.path.join(self.tmpdirname , _lowerCamelCase )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(_lowerCamelCase , _lowerCamelCase )
def UpperCAmelCase_ ( self , **_lowerCamelCase ):
return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token='''!''' , **_lowerCamelCase )
def UpperCAmelCase_ ( self , **_lowerCamelCase ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token='''!''' , **_lowerCamelCase )
def UpperCAmelCase_ ( self , **_lowerCamelCase ):
return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **_lowerCamelCase )
def UpperCAmelCase_ ( self ):
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowerCAmelCase_ = [Image.fromarray(np.moveaxis(_lowerCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = self.get_tokenizer()
lowerCAmelCase_ = self.get_rust_tokenizer()
lowerCAmelCase_ = self.get_image_processor()
lowerCAmelCase_ = OwlViTProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
processor_slow.save_pretrained(self.tmpdirname )
lowerCAmelCase_ = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=_lowerCamelCase )
lowerCAmelCase_ = OwlViTProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
processor_fast.save_pretrained(self.tmpdirname )
lowerCAmelCase_ = OwlViTProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , _lowerCamelCase )
self.assertIsInstance(processor_fast.tokenizer , _lowerCamelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , _lowerCamelCase )
self.assertIsInstance(processor_fast.image_processor , _lowerCamelCase )
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase_ = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
lowerCAmelCase_ = self.get_image_processor(do_normalize=_lowerCamelCase )
lowerCAmelCase_ = OwlViTProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_lowerCamelCase )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _lowerCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _lowerCamelCase )
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = self.get_image_processor()
lowerCAmelCase_ = self.get_tokenizer()
lowerCAmelCase_ = OwlViTProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
lowerCAmelCase_ = self.prepare_image_inputs()
lowerCAmelCase_ = image_processor(_lowerCamelCase , return_tensors='''np''' )
lowerCAmelCase_ = processor(images=_lowerCamelCase , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = self.get_image_processor()
lowerCAmelCase_ = self.get_tokenizer()
lowerCAmelCase_ = OwlViTProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
lowerCAmelCase_ = '''lower newer'''
lowerCAmelCase_ = processor(text=_lowerCamelCase , return_tensors='''np''' )
lowerCAmelCase_ = tokenizer(_lowerCamelCase , return_tensors='''np''' )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist() )
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = self.get_image_processor()
lowerCAmelCase_ = self.get_tokenizer()
lowerCAmelCase_ = OwlViTProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
lowerCAmelCase_ = '''lower newer'''
lowerCAmelCase_ = self.prepare_image_inputs()
lowerCAmelCase_ = processor(text=_lowerCamelCase , images=_lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(_lowerCamelCase ):
processor()
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = '''google/owlvit-base-patch32'''
lowerCAmelCase_ = OwlViTProcessor.from_pretrained(_lowerCamelCase )
lowerCAmelCase_ = ['''cat''', '''nasa badge''']
lowerCAmelCase_ = processor(text=_lowerCamelCase )
lowerCAmelCase_ = 16
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask'''] )
self.assertEqual(inputs['''input_ids'''].shape , (2, seq_length) )
# test if it raises when no input is passed
with pytest.raises(_lowerCamelCase ):
processor()
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = '''google/owlvit-base-patch32'''
lowerCAmelCase_ = OwlViTProcessor.from_pretrained(_lowerCamelCase )
lowerCAmelCase_ = [['''cat''', '''nasa badge'''], ['''person''']]
lowerCAmelCase_ = processor(text=_lowerCamelCase )
lowerCAmelCase_ = 16
lowerCAmelCase_ = len(_lowerCamelCase )
lowerCAmelCase_ = max([len(_lowerCamelCase ) for texts in input_texts] )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask'''] )
self.assertEqual(inputs['''input_ids'''].shape , (batch_size * num_max_text_queries, seq_length) )
# test if it raises when no input is passed
with pytest.raises(_lowerCamelCase ):
processor()
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = '''google/owlvit-base-patch32'''
lowerCAmelCase_ = OwlViTProcessor.from_pretrained(_lowerCamelCase )
lowerCAmelCase_ = ['''cat''', '''nasa badge''']
lowerCAmelCase_ = processor(text=_lowerCamelCase )
lowerCAmelCase_ = 16
lowerCAmelCase_ = inputs['''input_ids''']
lowerCAmelCase_ = [
[4_9406, 2368, 4_9407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[4_9406, 6841, 1_1301, 4_9407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask'''] )
self.assertEqual(inputs['''input_ids'''].shape , (2, seq_length) )
self.assertListEqual(list(input_ids[0] ) , predicted_ids[0] )
self.assertListEqual(list(input_ids[1] ) , predicted_ids[1] )
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = self.get_image_processor()
lowerCAmelCase_ = self.get_tokenizer()
lowerCAmelCase_ = OwlViTProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
lowerCAmelCase_ = self.prepare_image_inputs()
lowerCAmelCase_ = self.prepare_image_inputs()
lowerCAmelCase_ = processor(images=_lowerCamelCase , query_images=_lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , ['''query_pixel_values''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(_lowerCamelCase ):
processor()
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = self.get_image_processor()
lowerCAmelCase_ = self.get_tokenizer()
lowerCAmelCase_ = OwlViTProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
lowerCAmelCase_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCAmelCase_ = processor.batch_decode(_lowerCamelCase )
lowerCAmelCase_ = tokenizer.batch_decode(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
| 274
|
'''simple docstring'''
def snake_case_ ( __snake_case : int = 200) -> int:
lowerCAmelCase_ = [1, 2, 5, 10, 20, 50, 100, 200]
lowerCAmelCase_ = [0] * (pence + 1)
lowerCAmelCase_ = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(__snake_case , pence + 1 , 1):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(2_00) == 7_36_82
| 274
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
"microsoft/cvt-13": "https://huggingface.co/microsoft/cvt-13/resolve/main/config.json",
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class snake_case ( _UpperCamelCase):
__UpperCamelCase = 'cvt'
def __init__( self : List[Any] , a__ : List[str]=3 , a__ : List[str]=[7, 3, 3] , a__ : List[Any]=[4, 2, 2] , a__ : str=[2, 1, 1] , a__ : str=[64, 1_92, 3_84] , a__ : Optional[int]=[1, 3, 6] , a__ : str=[1, 2, 10] , a__ : str=[4.0, 4.0, 4.0] , a__ : Optional[int]=[0.0, 0.0, 0.0] , a__ : Any=[0.0, 0.0, 0.0] , a__ : Tuple=[0.0, 0.0, 0.1] , a__ : Optional[int]=[True, True, True] , a__ : Optional[int]=[False, False, True] , a__ : Any=["dw_bn", "dw_bn", "dw_bn"] , a__ : int=[3, 3, 3] , a__ : Any=[1, 1, 1] , a__ : Any=[2, 2, 2] , a__ : List[Any]=[1, 1, 1] , a__ : Dict=[1, 1, 1] , a__ : Tuple=0.0_2 , a__ : str=1E-1_2 , **a__ : str , ) -> Optional[int]:
'''simple docstring'''
super().__init__(**a__ )
_A = num_channels
_A = patch_sizes
_A = patch_stride
_A = patch_padding
_A = embed_dim
_A = num_heads
_A = depth
_A = mlp_ratio
_A = attention_drop_rate
_A = drop_rate
_A = drop_path_rate
_A = qkv_bias
_A = cls_token
_A = qkv_projection_method
_A = kernel_qkv
_A = padding_kv
_A = stride_kv
_A = padding_q
_A = stride_q
_A = initializer_range
_A = layer_norm_eps
| 621
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a_ = {
"configuration_roformer": ["ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "RoFormerConfig", "RoFormerOnnxConfig"],
"tokenization_roformer": ["RoFormerTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ["RoFormerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"RoFormerForCausalLM",
"RoFormerForMaskedLM",
"RoFormerForMultipleChoice",
"RoFormerForQuestionAnswering",
"RoFormerForSequenceClassification",
"RoFormerForTokenClassification",
"RoFormerLayer",
"RoFormerModel",
"RoFormerPreTrainedModel",
"load_tf_weights_in_roformer",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRoFormerForCausalLM",
"TFRoFormerForMaskedLM",
"TFRoFormerForMultipleChoice",
"TFRoFormerForQuestionAnswering",
"TFRoFormerForSequenceClassification",
"TFRoFormerForTokenClassification",
"TFRoFormerLayer",
"TFRoFormerModel",
"TFRoFormerPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"FlaxRoFormerForMaskedLM",
"FlaxRoFormerForMultipleChoice",
"FlaxRoFormerForQuestionAnswering",
"FlaxRoFormerForSequenceClassification",
"FlaxRoFormerForTokenClassification",
"FlaxRoFormerModel",
"FlaxRoFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 621
| 1
|
import argparse
import os
import sys
from unittest.mock import patch
import pytorch_lightning as pl
import timeout_decorator
import torch
from distillation import SummarizationDistiller, distill_main
from finetune import SummarizationModule, main
from transformers import MarianMTModel
from transformers.file_utils import cached_path
from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow
from utils import load_json
__lowerCamelCase : Optional[int] = "sshleifer/mar_enro_6_3_student"
class __magic_name__ ( A__ ):
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
super().setUp()
UpperCAmelCase = cached_path(
"https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz" , extract_compressed_file=UpperCamelCase__ , )
UpperCAmelCase = F'{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k'
@slow
@require_torch_gpu
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Any:
'''simple docstring'''
MarianMTModel.from_pretrained(UpperCamelCase__ )
@slow
@require_torch_gpu
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase = {
"$MAX_LEN": 64,
"$BS": 64,
"$GAS": 1,
"$ENRO_DIR": self.data_dir,
"facebook/mbart-large-cc25": MARIAN_MODEL,
# "val_check_interval=0.25": "val_check_interval=1.0",
"--learning_rate=3e-5": "--learning_rate 3e-4",
"--num_train_epochs 6": "--num_train_epochs 1",
}
# Clean up bash script
UpperCAmelCase = (self.test_file_dir / "train_mbart_cc25_enro.sh").open().read().split("finetune.py" )[1].strip()
UpperCAmelCase = bash_script.replace("\\\n" , "" ).strip().replace("\"$@\"" , "" )
for k, v in env_vars_to_replace.items():
UpperCAmelCase = bash_script.replace(UpperCamelCase__ , str(UpperCamelCase__ ) )
UpperCAmelCase = self.get_auto_remove_tmp_dir()
# bash_script = bash_script.replace("--fp16 ", "")
UpperCAmelCase = F'\n --output_dir {output_dir}\n --tokenizer_name Helsinki-NLP/opus-mt-en-ro\n --sortish_sampler\n --do_predict\n --gpus 1\n --freeze_encoder\n --n_train 40000\n --n_val 500\n --n_test 500\n --fp16_opt_level O1\n --num_sanity_val_steps 0\n --eval_beams 2\n '.split()
# XXX: args.gpus > 1 : handle multi_gpu in the future
UpperCAmelCase = ["finetune.py"] + bash_script.split() + args
with patch.object(UpperCamelCase__ , "argv" , UpperCamelCase__ ):
UpperCAmelCase = argparse.ArgumentParser()
UpperCAmelCase = pl.Trainer.add_argparse_args(UpperCamelCase__ )
UpperCAmelCase = SummarizationModule.add_model_specific_args(UpperCamelCase__ , os.getcwd() )
UpperCAmelCase = parser.parse_args()
UpperCAmelCase = main(UpperCamelCase__ )
# Check metrics
UpperCAmelCase = load_json(model.metrics_save_path )
UpperCAmelCase = metrics["val"][0]
UpperCAmelCase = metrics["val"][-1]
self.assertEqual(len(metrics["val"] ) , (args.max_epochs / args.val_check_interval) )
assert isinstance(last_step_stats[F'val_avg_{model.val_metric}'] , UpperCamelCase__ )
self.assertGreater(last_step_stats["val_avg_gen_time"] , 0.01 )
# model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?)
self.assertLessEqual(last_step_stats["val_avg_gen_time"] , 1.0 )
# test learning requirements:
# 1. BLEU improves over the course of training by more than 2 pts
self.assertGreater(last_step_stats["val_avg_bleu"] - first_step_stats["val_avg_bleu"] , 2 )
# 2. BLEU finishes above 17
self.assertGreater(last_step_stats["val_avg_bleu"] , 17 )
# 3. test BLEU and val BLEU within ~1.1 pt.
self.assertLess(abs(metrics["val"][-1]["val_avg_bleu"] - metrics["test"][-1]["test_avg_bleu"] ) , 1.1 )
# check lightning ckpt can be loaded and has a reasonable statedict
UpperCAmelCase = os.listdir(UpperCamelCase__ )
UpperCAmelCase = [x for x in contents if x.endswith(".ckpt" )][0]
UpperCAmelCase = os.path.join(args.output_dir , UpperCamelCase__ )
UpperCAmelCase = torch.load(UpperCamelCase__ , map_location="cpu" )
UpperCAmelCase = "model.model.decoder.layers.0.encoder_attn_layer_norm.weight"
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
UpperCAmelCase = {os.path.basename(UpperCamelCase__ ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics["test"] ) == 1
class __magic_name__ ( A__ ):
@timeout_decorator.timeout(6_00 )
@slow
@require_torch_gpu
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase = F'{self.test_file_dir_str}/test_data/wmt_en_ro'
UpperCAmelCase = {
"--fp16_opt_level=O1": "",
"$MAX_LEN": 1_28,
"$BS": 16,
"$GAS": 1,
"$ENRO_DIR": data_dir,
"$m": "sshleifer/student_marian_en_ro_6_1",
"val_check_interval=0.25": "val_check_interval=1.0",
}
# Clean up bash script
UpperCAmelCase = (
(self.test_file_dir / "distil_marian_no_teacher.sh").open().read().split("distillation.py" )[1].strip()
)
UpperCAmelCase = bash_script.replace("\\\n" , "" ).strip().replace("\"$@\"" , "" )
UpperCAmelCase = bash_script.replace("--fp16 " , " " )
for k, v in env_vars_to_replace.items():
UpperCAmelCase = bash_script.replace(UpperCamelCase__ , str(UpperCamelCase__ ) )
UpperCAmelCase = self.get_auto_remove_tmp_dir()
UpperCAmelCase = bash_script.replace("--fp16" , "" )
UpperCAmelCase = 6
UpperCAmelCase = (
["distillation.py"]
+ bash_script.split()
+ [
F'--output_dir={output_dir}',
"--gpus=1",
"--learning_rate=1e-3",
F'--num_train_epochs={epochs}',
"--warmup_steps=10",
"--val_check_interval=1.0",
"--do_predict",
]
)
with patch.object(UpperCamelCase__ , "argv" , UpperCamelCase__ ):
UpperCAmelCase = argparse.ArgumentParser()
UpperCAmelCase = pl.Trainer.add_argparse_args(UpperCamelCase__ )
UpperCAmelCase = SummarizationDistiller.add_model_specific_args(UpperCamelCase__ , os.getcwd() )
UpperCAmelCase = parser.parse_args()
# assert args.gpus == gpus THIS BREAKS for multi_gpu
UpperCAmelCase = distill_main(UpperCamelCase__ )
# Check metrics
UpperCAmelCase = load_json(model.metrics_save_path )
UpperCAmelCase = metrics["val"][0]
UpperCAmelCase = metrics["val"][-1]
assert len(metrics["val"] ) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check
assert last_step_stats["val_avg_gen_time"] >= 0.01
assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing
assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved.
assert isinstance(last_step_stats[F'val_avg_{model.val_metric}'] , UpperCamelCase__ )
# check lightning ckpt can be loaded and has a reasonable statedict
UpperCAmelCase = os.listdir(UpperCamelCase__ )
UpperCAmelCase = [x for x in contents if x.endswith(".ckpt" )][0]
UpperCAmelCase = os.path.join(args.output_dir , UpperCamelCase__ )
UpperCAmelCase = torch.load(UpperCamelCase__ , map_location="cpu" )
UpperCAmelCase = "model.model.decoder.layers.0.encoder_attn_layer_norm.weight"
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
UpperCAmelCase = {os.path.basename(UpperCamelCase__ ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics["test"] ) == 1
| 323
|
def lowerCamelCase_(lowerCamelCase_ ) -> None:
UpperCAmelCase = generate_pascal_triangle(lowerCamelCase_ )
for row_idx in range(lowerCamelCase_ ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=" " )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] , end=" " )
else:
print(triangle[row_idx][col_idx] , end="" )
print()
def lowerCamelCase_(lowerCamelCase_ ) -> list[list[int]]:
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise TypeError("The input value of 'num_rows' should be 'int'" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"The input value of 'num_rows' should be greater than or equal to 0" )
UpperCAmelCase = []
for current_row_idx in range(lowerCamelCase_ ):
UpperCAmelCase = populate_current_row(lowerCamelCase_ , lowerCamelCase_ )
triangle.append(lowerCamelCase_ )
return triangle
def lowerCamelCase_(lowerCamelCase_ , lowerCamelCase_ ) -> list[int]:
UpperCAmelCase = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
UpperCAmelCase , UpperCAmelCase = 1, 1
for current_col_idx in range(1 , lowerCamelCase_ ):
calculate_current_element(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
return current_row
def lowerCamelCase_(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , ) -> None:
UpperCAmelCase = triangle[current_row_idx - 1][current_col_idx - 1]
UpperCAmelCase = triangle[current_row_idx - 1][current_col_idx]
UpperCAmelCase = above_to_left_elt + above_to_right_elt
def lowerCamelCase_(lowerCamelCase_ ) -> list[list[int]]:
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise TypeError("The input value of 'num_rows' should be 'int'" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"The input value of 'num_rows' should be greater than or equal to 0" )
UpperCAmelCase = [[1]]
for row_index in range(1 , lowerCamelCase_ ):
UpperCAmelCase = [0] + result[-1] + [0]
UpperCAmelCase = row_index + 1
# Calculate the number of distinct elements in a row
UpperCAmelCase = sum(divmod(lowerCamelCase_ , 2 ) )
UpperCAmelCase = [
temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 )
]
UpperCAmelCase = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
UpperCAmelCase = row_first_half + row_second_half
result.append(lowerCamelCase_ )
return result
def lowerCamelCase_() -> None:
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(lowerCamelCase_ , lowerCamelCase_ ) -> None:
UpperCAmelCase = F'{func.__name__}({value})'
UpperCAmelCase = timeit(F'__main__.{call}' , setup="import __main__" )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(F'{call:38} -- {timing:.4f} seconds' )
for value in range(15 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(lowerCamelCase_ , lowerCamelCase_ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 323
| 1
|
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
ImageTextPipelineOutput,
UniDiffuserPipeline,
)
else:
from .modeling_text_decoder import UniDiffuserTextDecoder
from .modeling_uvit import UniDiffuserModel, UTransformeraDModel
from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline
| 99
|
def _UpperCAmelCase ( a : int , a : Optional[Any] ):
# "extended trapezoidal rule"
# int(f) = dx/2 * (f1 + 2f2 + ... + fn)
snake_case__ = (boundary[1] - boundary[0]) / steps
snake_case__ = boundary[0]
snake_case__ = boundary[1]
snake_case__ = make_points(a , a , a )
snake_case__ = 0.0
y += (h / 2.0) * f(a )
for i in x_i:
# print(i)
y += h * f(a )
y += (h / 2.0) * f(a )
return y
def _UpperCAmelCase ( a : Dict , a : Union[str, Any] , a : Union[str, Any] ):
snake_case__ = a + h
while x < (b - h):
yield x
snake_case__ = x + h
def _UpperCAmelCase ( a : Any ): # enter your function here
snake_case__ = (x - 0) * (x - 0)
return y
def _UpperCAmelCase ( ):
snake_case__ = 0.0 # Lower bound of integration
snake_case__ = 1.0 # Upper bound of integration
snake_case__ = 10.0 # define number of steps or resolution
snake_case__ = [a, b] # define boundary of integration
snake_case__ = method_a(a , a )
print(F'''y = {y}''' )
if __name__ == "__main__":
main()
| 99
| 1
|
'''simple docstring'''
__magic_name__ : Optional[int] = '''Input must be a string of 8 numbers plus letter'''
__magic_name__ : Tuple = '''TRWAGMYFPDXBNJZSQVHLCKE'''
def A__ ( A_ ) -> bool:
if not isinstance(A_ , A_ ):
_lowercase = F"""Expected string as input, found {type(A_ ).__name__}"""
raise TypeError(A_ )
_lowercase = spanish_id.replace("-" , "" ).upper()
if len(A_ ) != 9:
raise ValueError(A_ )
try:
_lowercase = int(spanish_id_clean[0:8] )
_lowercase = spanish_id_clean[8]
except ValueError as ex:
raise ValueError(A_ ) from ex
if letter.isdigit():
raise ValueError(A_ )
return letter == LOOKUP_LETTERS[number % 23]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 497
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
__magic_name__ : Dict = list[list[float | int]]
def A__ ( A_ , A_ ) -> Matrix:
_lowercase = len(A_ )
_lowercase = [[0 for _ in range(size + 1 )] for _ in range(A_ )]
_lowercase = 42
_lowercase = 42
_lowercase = 42
_lowercase = 42
_lowercase = 42
_lowercase = 42
for row in range(A_ ):
for col in range(A_ ):
_lowercase = matrix[row][col]
_lowercase = vector[row][0]
_lowercase = 0
_lowercase = 0
while row < size and col < size:
# pivoting
_lowercase = max((abs(augmented[rowa][col] ), rowa) for rowa in range(A_ , A_ ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
_lowercase , _lowercase = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , A_ ):
_lowercase = augmented[rowa][col] / augmented[row][col]
_lowercase = 0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , A_ ):
for row in range(A_ ):
_lowercase = augmented[row][col] / augmented[col][col]
for cola in range(A_ , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(A_ )
]
def A__ ( A_ ) -> Callable[[int], int]:
_lowercase = len(A_ )
_lowercase = [[0 for _ in range(A_ )] for _ in range(A_ )]
_lowercase = [[0] for _ in range(A_ )]
_lowercase = 42
_lowercase = 42
_lowercase = 42
_lowercase = 42
for x_val, y_val in enumerate(A_ ):
for col in range(A_ ):
_lowercase = (x_val + 1) ** (size - col - 1)
_lowercase = y_val
_lowercase = solve(A_ , A_ )
def interpolated_func(A_ ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(A_ ) )
return interpolated_func
def A__ ( A_ ) -> int:
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def A__ ( A_ = question_function , A_ = 10 ) -> int:
_lowercase = [func(A_ ) for x_val in range(1 , order + 1 )]
_lowercase = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
_lowercase = 0
_lowercase = 42
_lowercase = 42
for poly in polynomials:
_lowercase = 1
while func(A_ ) == poly(A_ ):
x_val += 1
ret += poly(A_ )
return ret
if __name__ == "__main__":
print(f'''{solution() = }''')
| 497
| 1
|
from __future__ import annotations
__a = 1.6_0_2_1E-1_9 # units = C
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , ):
'''simple docstring'''
if (conductivity, electron_conc, mobility).count(0 ) != 1:
raise ValueError('''You cannot supply more or less than 2 values''' )
elif conductivity < 0:
raise ValueError('''Conductivity cannot be negative''' )
elif electron_conc < 0:
raise ValueError('''Electron concentration cannot be negative''' )
elif mobility < 0:
raise ValueError('''mobility cannot be negative''' )
elif conductivity == 0:
return (
"conductivity",
mobility * electron_conc * ELECTRON_CHARGE,
)
elif electron_conc == 0:
return (
"electron_conc",
conductivity / (mobility * ELECTRON_CHARGE),
)
else:
return (
"mobility",
conductivity / (electron_conc * ELECTRON_CHARGE),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 300
|
from itertools import product
from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey
from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros
def lowerCamelCase__ ( _lowercase , _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : Any = k_size // 2
UpperCAmelCase_, UpperCAmelCase_ : Optional[Any] = mgrid[0 - center : k_size - center, 0 - center : k_size - center]
UpperCAmelCase_ : Optional[int] = 1 / (2 * pi * sigma) * exp(-(square(_lowercase ) + square(_lowercase )) / (2 * square(_lowercase )) )
return g
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
UpperCAmelCase_, UpperCAmelCase_ : Optional[int] = image.shape[0], image.shape[1]
# dst image height and width
UpperCAmelCase_ : List[Any] = height - k_size + 1
UpperCAmelCase_ : Any = width - k_size + 1
# im2col, turn the k_size*k_size pixels into a row and np.vstack all rows
UpperCAmelCase_ : Tuple = zeros((dst_height * dst_width, k_size * k_size) )
UpperCAmelCase_ : Optional[Any] = 0
for i, j in product(range(_lowercase ) , range(_lowercase ) ):
UpperCAmelCase_ : Union[str, Any] = ravel(image[i : i + k_size, j : j + k_size] )
UpperCAmelCase_ : List[str] = window
row += 1
# turn the kernel into shape(k*k, 1)
UpperCAmelCase_ : str = gen_gaussian_kernel(_lowercase , _lowercase )
UpperCAmelCase_ : List[Any] = ravel(_lowercase )
# reshape and get the dst image
UpperCAmelCase_ : List[str] = dot(_lowercase , _lowercase ).reshape(_lowercase , _lowercase ).astype(_lowercase )
return dst
if __name__ == "__main__":
# read original image
__a = imread(R'../image_data/lena.jpg')
# turn image in gray scale value
__a = cvtColor(img, COLOR_BGR2GRAY)
# get values with two different mask size
__a = gaussian_filter(gray, 3, sigma=1)
__a = gaussian_filter(gray, 5, sigma=0.8)
# show result images
imshow('gaussian filter with 3x3 mask', gaussianaxa)
imshow('gaussian filter with 5x5 mask', gaussianaxa)
waitKey()
| 300
| 1
|
'''simple docstring'''
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
A_ = datasets.utils.logging.get_logger(__name__)
@dataclass
class UpperCAmelCase ( datasets.BuilderConfig ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = 1_00_00
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
class UpperCAmelCase ( datasets.ArrowBasedBuilder ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = ParquetConfig
def UpperCamelCase( self ) -> Optional[Any]:
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> Any:
'''simple docstring'''
if not self.config.data_files:
raise ValueError(f'''At least one data file must be specified, but got data_files={self.config.data_files}''' )
lowerCamelCase_ = dl_manager.download_and_extract(self.config.data_files )
if isinstance(lowerCamelCase__ , (str, list, tuple) ):
lowerCamelCase_ = data_files
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
lowerCamelCase_ = [dl_manager.iter_files(lowerCamelCase__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'files': files} )]
lowerCamelCase_ = []
for split_name, files in data_files.items():
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
lowerCamelCase_ = [dl_manager.iter_files(lowerCamelCase__ ) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(lowerCamelCase__ ):
with open(lowerCamelCase__ , 'rb' ) as f:
lowerCamelCase_ = datasets.Features.from_arrow_schema(pq.read_schema(lowerCamelCase__ ) )
break
splits.append(datasets.SplitGenerator(name=lowerCamelCase__ , gen_kwargs={'files': files} ) )
return splits
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> List[str]:
'''simple docstring'''
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
lowerCamelCase_ = table_cast(lowerCamelCase__ , self.info.features.arrow_schema )
return pa_table
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ = self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema ) != sorted(self.config.columns ):
raise ValueError(
f'''Tried to load parquet data with columns \'{self.config.columns}\' with mismatching features \'{self.info.features}\'''' )
for file_idx, file in enumerate(itertools.chain.from_iterable(lowerCamelCase__ ) ):
with open(lowerCamelCase__ , 'rb' ) as f:
lowerCamelCase_ = pq.ParquetFile(lowerCamelCase__ )
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ):
lowerCamelCase_ = pa.Table.from_batches([record_batch] )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield f'''{file_idx}_{batch_idx}''', self._cast_table(lowerCamelCase__ )
except ValueError as e:
logger.error(f'''Failed to read file \'{file}\' with error {type(lowerCamelCase__ )}: {e}''' )
raise
| 42
|
"""simple docstring"""
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def _SCREAMING_SNAKE_CASE ( UpperCamelCase : Any , UpperCamelCase : int , UpperCamelCase : Any ):
A__ = {
"""en""": """Machine learning is great, isn't it?""",
"""ru""": """Машинное обучение - это здорово, не так ли?""",
"""de""": """Maschinelles Lernen ist großartig, oder?""",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
A__ = {
"""ru-en""": ["""[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)""", """39.20"""],
"""en-ru""": ["""[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)""", """33.47"""],
"""en-de""": ["""[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)""", """42.83"""],
"""de-en""": ["""[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)""", """41.35"""],
}
A__ = F"""{src_lang}-{tgt_lang}"""
A__ = F"""
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt19
- facebook
license: apache-2.0
datasets:
- wmt19
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.
For more details, please see, [Facebook FAIR's WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).
The abbreviation FSMT stands for FairSeqMachineTranslation
All four models are available:
* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)
* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)
* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)
* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = \"facebook/wmt19-{src_lang}-{tgt_lang}\"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = \"{texts[src_lang]}\"
input_ids = tokenizer.encode(input, return_tensors=\"pt\")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
- The original (and this ported model) doesn't seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)
## Training data
Pretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).
## Eval results
pair | fairseq | transformers
-------|---------|----------
{pair} | {scores[pair][0]} | {scores[pair][1]}
The score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn't support:
- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).
- re-ranking
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=15
mkdir -p $DATA_DIR
sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
note: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.
## Data Sources
- [training, etc.](http://www.statmt.org/wmt19/)
- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)
### BibTeX entry and citation info
```bibtex
@inproceedings{{...,
year={{2020}},
title={{Facebook FAIR's WMT19 News Translation Task Submission}},
author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},
booktitle={{Proc. of WMT}},
}}
```
## TODO
- port model ensemble (fairseq uses 4 model checkpoints)
"""
os.makedirs(UpperCamelCase , exist_ok=UpperCamelCase )
A__ = os.path.join(UpperCamelCase , """README.md""" )
print(F"""Generating {path}""" )
with open(UpperCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(UpperCamelCase )
# make sure we are under the root of the project
lowerCamelCase__ = Path(__file__).resolve().parent.parent.parent
lowerCamelCase__ = repo_dir / "model_cards"
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = model_name.split("-")
lowerCamelCase__ = model_cards_dir / "facebook" / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 574
| 0
|
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class lowerCamelCase ( unittest.TestCase ):
def lowerCAmelCase_ ( self ) -> str:
'''simple docstring'''
snake_case: str = tempfile.mkdtemp()
snake_case: str = BlipImageProcessor()
snake_case: Any = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-BertModel""" )
snake_case: Any = BlipProcessor(snake_case_ , snake_case_ )
processor.save_pretrained(self.tmpdirname )
def lowerCAmelCase_ ( self , **__lowerCamelCase ) -> List[Any]:
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **snake_case_ ).tokenizer
def lowerCAmelCase_ ( self , **__lowerCamelCase ) -> Any:
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **snake_case_ ).image_processor
def lowerCAmelCase_ ( self ) -> List[Any]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def lowerCAmelCase_ ( self ) -> List[Any]:
'''simple docstring'''
snake_case: int = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
snake_case: Any = [Image.fromarray(np.moveaxis(snake_case_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowerCAmelCase_ ( self ) -> Dict:
'''simple docstring'''
snake_case: str = BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
snake_case: Optional[int] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
snake_case: int = self.get_image_processor(do_normalize=snake_case_ , padding_value=1.0 )
snake_case: List[Any] = BlipProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=snake_case_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , snake_case_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , snake_case_ )
def lowerCAmelCase_ ( self ) -> List[Any]:
'''simple docstring'''
snake_case: List[str] = self.get_image_processor()
snake_case: str = self.get_tokenizer()
snake_case: Tuple = BlipProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
snake_case: int = self.prepare_image_inputs()
snake_case: int = image_processor(snake_case_ , return_tensors="""np""" )
snake_case: List[str] = processor(images=snake_case_ , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowerCAmelCase_ ( self ) -> Dict:
'''simple docstring'''
snake_case: Optional[Any] = self.get_image_processor()
snake_case: int = self.get_tokenizer()
snake_case: List[str] = BlipProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
snake_case: Dict = """lower newer"""
snake_case: Dict = processor(text=snake_case_ )
snake_case: Dict = tokenizer(snake_case_ , return_token_type_ids=snake_case_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCAmelCase_ ( self ) -> Dict:
'''simple docstring'''
snake_case: List[str] = self.get_image_processor()
snake_case: Optional[Any] = self.get_tokenizer()
snake_case: Dict = BlipProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
snake_case: Union[str, Any] = """lower newer"""
snake_case: Any = self.prepare_image_inputs()
snake_case: Any = processor(text=snake_case_ , images=snake_case_ )
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """input_ids""", """attention_mask"""] )
# test if it raises when no input is passed
with pytest.raises(snake_case_ ):
processor()
def lowerCAmelCase_ ( self ) -> int:
'''simple docstring'''
snake_case: str = self.get_image_processor()
snake_case: Union[str, Any] = self.get_tokenizer()
snake_case: Tuple = BlipProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
snake_case: Any = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
snake_case: Union[str, Any] = processor.batch_decode(snake_case_ )
snake_case: Optional[int] = tokenizer.batch_decode(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
def lowerCAmelCase_ ( self ) -> Optional[int]:
'''simple docstring'''
snake_case: Dict = self.get_image_processor()
snake_case: str = self.get_tokenizer()
snake_case: List[str] = BlipProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
snake_case: Union[str, Any] = """lower newer"""
snake_case: str = self.prepare_image_inputs()
snake_case: Tuple = processor(text=snake_case_ , images=snake_case_ )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """input_ids""", """attention_mask"""] )
| 720
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class lowerCamelCase ( __snake_case ):
__lowerCamelCase = 'facebook/bart-large-mnli'
__lowerCamelCase = (
'This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which '
'should be the text to classify, and `labels`, which should be the list of labels to use for classification. '
'It returns the most likely label in the list of provided `labels` for the input text.'
)
__lowerCamelCase = 'text_classifier'
__lowerCamelCase = AutoTokenizer
__lowerCamelCase = AutoModelForSequenceClassification
__lowerCamelCase = ['text', ['text']]
__lowerCamelCase = ['text']
def lowerCAmelCase_ ( self ) -> int:
'''simple docstring'''
super().setup()
snake_case: Dict = self.model.config
snake_case: Optional[int] = -1
for idx, label in config.idalabel.items():
if label.lower().startswith("""entail""" ):
snake_case: Any = int(__lowerCamelCase )
if self.entailment_id == -1:
raise ValueError("""Could not determine the entailment ID from the model config, please pass it at init.""" )
def lowerCAmelCase_ ( self , __lowerCamelCase , __lowerCamelCase ) -> str:
'''simple docstring'''
snake_case: Union[str, Any] = labels
return self.pre_processor(
[text] * len(__lowerCamelCase ) , [F"This example is {label}" for label in labels] , return_tensors="""pt""" , padding="""max_length""" , )
def lowerCAmelCase_ ( self , __lowerCamelCase ) -> str:
'''simple docstring'''
snake_case: List[str] = outputs.logits
snake_case: int = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 164
| 0
|
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__ ) -> str:
if isinstance(snake_case__, torch.Tensor ):
return image
elif isinstance(snake_case__, PIL.Image.Image ):
__UpperCAmelCase : Any = [image]
if isinstance(image[0], PIL.Image.Image ):
__UpperCAmelCase : Tuple = [np.array(i.resize((w, h), resample=PIL_INTERPOLATION["lanczos"] ) )[None, :] for i in image]
__UpperCAmelCase : Optional[int] = np.concatenate(snake_case__, axis=0 )
__UpperCAmelCase : List[Any] = np.array(snake_case__ ).astype(np.floataa ) / 255.0
__UpperCAmelCase : Dict = image.transpose(0, 3, 1, 2 )
__UpperCAmelCase : List[Any] = 2.0 * image - 1.0
__UpperCAmelCase : Optional[int] = torch.from_numpy(snake_case__ )
elif isinstance(image[0], torch.Tensor ):
__UpperCAmelCase : Any = torch.cat(snake_case__, dim=0 )
return image
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__, snake_case__=0.9995 ) -> List[str]:
if not isinstance(snake_case__, np.ndarray ):
__UpperCAmelCase : List[str] = True
__UpperCAmelCase : int = va.device
__UpperCAmelCase : Any = va.cpu().numpy()
__UpperCAmelCase : Optional[Any] = va.cpu().numpy()
__UpperCAmelCase : List[Any] = np.sum(va * va / (np.linalg.norm(snake_case__ ) * np.linalg.norm(snake_case__ )) )
if np.abs(snake_case__ ) > DOT_THRESHOLD:
__UpperCAmelCase : List[str] = (1 - t) * va + t * va
else:
__UpperCAmelCase : int = np.arccos(snake_case__ )
__UpperCAmelCase : Any = np.sin(snake_case__ )
__UpperCAmelCase : int = theta_a * t
__UpperCAmelCase : Dict = np.sin(snake_case__ )
__UpperCAmelCase : Any = np.sin(theta_a - theta_t ) / sin_theta_a
__UpperCAmelCase : List[str] = sin_theta_t / sin_theta_a
__UpperCAmelCase : Any = sa * va + sa * va
if inputs_are_torch:
__UpperCAmelCase : int = torch.from_numpy(snake_case__ ).to(snake_case__ )
return va
def _UpperCamelCase ( snake_case__, snake_case__ ) -> Optional[Any]:
__UpperCAmelCase : str = F.normalize(snake_case__, dim=-1 )
__UpperCAmelCase : List[str] = F.normalize(snake_case__, dim=-1 )
return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 )
def _UpperCamelCase ( snake_case__, snake_case__ ) -> Optional[int]:
for param in model.parameters():
__UpperCAmelCase : List[Any] = value
class _snake_case ( lowerCAmelCase__ ):
def __init__( self: Tuple , __lowerCamelCase: AutoencoderKL , __lowerCamelCase: CLIPTextModel , __lowerCamelCase: CLIPModel , __lowerCamelCase: CLIPTokenizer , __lowerCamelCase: UNetaDConditionModel , __lowerCamelCase: Union[PNDMScheduler, LMSDiscreteScheduler, DDIMScheduler, DPMSolverMultistepScheduler] , __lowerCamelCase: CLIPFeatureExtractor , __lowerCamelCase: Tuple=None , __lowerCamelCase: Any=None , __lowerCamelCase: Tuple=None , ) -> Optional[int]:
super().__init__()
self.register_modules(
vae=lowerCAmelCase_ , text_encoder=lowerCAmelCase_ , clip_model=lowerCAmelCase_ , tokenizer=lowerCAmelCase_ , unet=lowerCAmelCase_ , scheduler=lowerCAmelCase_ , feature_extractor=lowerCAmelCase_ , coca_model=lowerCAmelCase_ , coca_tokenizer=lowerCAmelCase_ , coca_transform=lowerCAmelCase_ , )
__UpperCAmelCase : Optional[int] = (
feature_extractor.size
if isinstance(feature_extractor.size , lowerCAmelCase_ )
else feature_extractor.size["shortest_edge"]
)
__UpperCAmelCase : Tuple = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std )
set_requires_grad(self.text_encoder , lowerCAmelCase_ )
set_requires_grad(self.clip_model , lowerCAmelCase_ )
def _lowerCamelCase ( self: List[str] , __lowerCamelCase: Optional[Union[str, int]] = "auto" ) -> Any:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
__UpperCAmelCase : Union[str, Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowerCAmelCase_ )
def _lowerCamelCase ( self: Tuple ) -> Optional[Any]:
self.enable_attention_slicing(lowerCAmelCase_ )
def _lowerCamelCase ( self: Optional[int] ) -> List[str]:
set_requires_grad(self.vae , lowerCAmelCase_ )
def _lowerCamelCase ( self: str ) -> Tuple:
set_requires_grad(self.vae , lowerCAmelCase_ )
def _lowerCamelCase ( self: List[str] ) -> Dict:
set_requires_grad(self.unet , lowerCAmelCase_ )
def _lowerCamelCase ( self: Optional[Any] ) -> str:
set_requires_grad(self.unet , lowerCAmelCase_ )
def _lowerCamelCase ( self: Optional[int] , __lowerCamelCase: List[str] , __lowerCamelCase: int , __lowerCamelCase: Optional[Any] ) -> str:
__UpperCAmelCase : Any = min(int(num_inference_steps * strength ) , lowerCAmelCase_ )
__UpperCAmelCase : List[str] = max(num_inference_steps - init_timestep , 0 )
__UpperCAmelCase : List[Any] = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def _lowerCamelCase ( self: Optional[Any] , __lowerCamelCase: int , __lowerCamelCase: Optional[int] , __lowerCamelCase: int , __lowerCamelCase: List[Any] , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: List[Any]=None ) -> Union[str, Any]:
if not isinstance(lowerCAmelCase_ , torch.Tensor ):
raise ValueError(f'''`image` has to be of type `torch.Tensor` but is {type(lowerCAmelCase_ )}''' )
__UpperCAmelCase : List[Any] = image.to(device=lowerCAmelCase_ , dtype=lowerCAmelCase_ )
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
__UpperCAmelCase : int = [
self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(lowerCAmelCase_ )
]
__UpperCAmelCase : List[str] = torch.cat(lowerCAmelCase_ , dim=0 )
else:
__UpperCAmelCase : str = self.vae.encode(lowerCAmelCase_ ).latent_dist.sample(lowerCAmelCase_ )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
__UpperCAmelCase : str = 0.1_82_15 * init_latents
__UpperCAmelCase : Any = init_latents.repeat_interleave(lowerCAmelCase_ , dim=0 )
__UpperCAmelCase : Any = randn_tensor(init_latents.shape , generator=lowerCAmelCase_ , device=lowerCAmelCase_ , dtype=lowerCAmelCase_ )
# get latents
__UpperCAmelCase : int = self.scheduler.add_noise(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
__UpperCAmelCase : List[Any] = init_latents
return latents
def _lowerCamelCase ( self: Optional[int] , __lowerCamelCase: Any ) -> Dict:
__UpperCAmelCase : List[Any] = self.coca_transform(lowerCAmelCase_ ).unsqueeze(0 )
with torch.no_grad(), torch.cuda.amp.autocast():
__UpperCAmelCase : int = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) )
__UpperCAmelCase : List[str] = self.coca_tokenizer.decode(generated[0].cpu().numpy() )
return generated.split("<end_of_text>" )[0].replace("<start_of_text>" , "" ).rstrip(" .," )
def _lowerCamelCase ( self: Union[str, Any] , __lowerCamelCase: Optional[Any] , __lowerCamelCase: Union[str, Any] ) -> str:
__UpperCAmelCase : int = self.feature_extractor.preprocess(lowerCAmelCase_ )
__UpperCAmelCase : Union[str, Any] = torch.from_numpy(clip_image_input["pixel_values"][0] ).unsqueeze(0 ).to(self.device ).half()
__UpperCAmelCase : Any = self.clip_model.get_image_features(lowerCAmelCase_ )
__UpperCAmelCase : Optional[Any] = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=lowerCAmelCase_ )
__UpperCAmelCase : int = image_embeddings_clip.repeat_interleave(lowerCAmelCase_ , dim=0 )
return image_embeddings_clip
@torch.enable_grad()
def _lowerCamelCase ( self: Any , __lowerCamelCase: Optional[int] , __lowerCamelCase: Tuple , __lowerCamelCase: str , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: str , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: Dict , ) -> str:
__UpperCAmelCase : Dict = latents.detach().requires_grad_()
__UpperCAmelCase : Optional[Any] = self.scheduler.scale_model_input(lowerCAmelCase_ , lowerCAmelCase_ )
# predict the noise residual
__UpperCAmelCase : Any = self.unet(lowerCAmelCase_ , lowerCAmelCase_ , encoder_hidden_states=lowerCAmelCase_ ).sample
if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ):
__UpperCAmelCase : Any = self.scheduler.alphas_cumprod[timestep]
__UpperCAmelCase : Optional[Any] = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__UpperCAmelCase : Optional[int] = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
__UpperCAmelCase : Optional[Any] = torch.sqrt(lowerCAmelCase_ )
__UpperCAmelCase : int = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler , lowerCAmelCase_ ):
__UpperCAmelCase : Union[str, Any] = self.scheduler.sigmas[index]
__UpperCAmelCase : Optional[int] = latents - sigma * noise_pred
else:
raise ValueError(f'''scheduler type {type(self.scheduler )} not supported''' )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
__UpperCAmelCase : Any = 1 / 0.1_82_15 * sample
__UpperCAmelCase : List[Any] = self.vae.decode(lowerCAmelCase_ ).sample
__UpperCAmelCase : Optional[Any] = (image / 2 + 0.5).clamp(0 , 1 )
__UpperCAmelCase : List[str] = transforms.Resize(self.feature_extractor_size )(lowerCAmelCase_ )
__UpperCAmelCase : str = self.normalize(lowerCAmelCase_ ).to(latents.dtype )
__UpperCAmelCase : Optional[int] = self.clip_model.get_image_features(lowerCAmelCase_ )
__UpperCAmelCase : Optional[int] = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=lowerCAmelCase_ )
__UpperCAmelCase : Union[str, Any] = spherical_dist_loss(lowerCAmelCase_ , lowerCAmelCase_ ).mean() * clip_guidance_scale
__UpperCAmelCase : Any = -torch.autograd.grad(lowerCAmelCase_ , lowerCAmelCase_ )[0]
if isinstance(self.scheduler , lowerCAmelCase_ ):
__UpperCAmelCase : Dict = latents.detach() + grads * (sigma**2)
__UpperCAmelCase : List[str] = noise_pred_original
else:
__UpperCAmelCase : List[str] = noise_pred_original - torch.sqrt(lowerCAmelCase_ ) * grads
return noise_pred, latents
@torch.no_grad()
def __call__( self: int , __lowerCamelCase: Union[torch.FloatTensor, PIL.Image.Image] , __lowerCamelCase: Union[torch.FloatTensor, PIL.Image.Image] , __lowerCamelCase: Optional[str] = None , __lowerCamelCase: Optional[str] = None , __lowerCamelCase: Optional[int] = 5_12 , __lowerCamelCase: Optional[int] = 5_12 , __lowerCamelCase: float = 0.6 , __lowerCamelCase: Optional[int] = 50 , __lowerCamelCase: Optional[float] = 7.5 , __lowerCamelCase: Optional[int] = 1 , __lowerCamelCase: float = 0.0 , __lowerCamelCase: Optional[float] = 1_00 , __lowerCamelCase: Optional[torch.Generator] = None , __lowerCamelCase: Optional[str] = "pil" , __lowerCamelCase: bool = True , __lowerCamelCase: float = 0.8 , __lowerCamelCase: float = 0.1 , __lowerCamelCase: float = 0.1 , ) -> List[str]:
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and len(lowerCAmelCase_ ) != batch_size:
raise ValueError(f'''You have passed {batch_size} batch_size, but only {len(lowerCAmelCase_ )} generators.''' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' )
if isinstance(lowerCAmelCase_ , torch.Generator ) and batch_size > 1:
__UpperCAmelCase : Tuple = [generator] + [None] * (batch_size - 1)
__UpperCAmelCase : List[Any] = [
("model", self.coca_model is None),
("tokenizer", self.coca_tokenizer is None),
("transform", self.coca_transform is None),
]
__UpperCAmelCase : Union[str, Any] = [x[0] for x in coca_is_none if x[1]]
__UpperCAmelCase : List[Any] = ", ".join(lowerCAmelCase_ )
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(lowerCAmelCase_ ):
raise ValueError(
f'''Content prompt is None and CoCa [{coca_is_none_str}] is None.'''
f'''Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''' )
__UpperCAmelCase : Tuple = self.get_image_description(lowerCAmelCase_ )
if style_prompt is None:
if len(lowerCAmelCase_ ):
raise ValueError(
f'''Style prompt is None and CoCa [{coca_is_none_str}] is None.'''
f''' Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''' )
__UpperCAmelCase : int = self.get_image_description(lowerCAmelCase_ )
# get prompt text embeddings for content and style
__UpperCAmelCase : Optional[Any] = self.tokenizer(
lowerCAmelCase_ , padding="max_length" , max_length=self.tokenizer.model_max_length , truncation=lowerCAmelCase_ , return_tensors="pt" , )
__UpperCAmelCase : int = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0]
__UpperCAmelCase : str = self.tokenizer(
lowerCAmelCase_ , padding="max_length" , max_length=self.tokenizer.model_max_length , truncation=lowerCAmelCase_ , return_tensors="pt" , )
__UpperCAmelCase : List[str] = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0]
__UpperCAmelCase : Tuple = slerp(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# duplicate text embeddings for each generation per prompt
__UpperCAmelCase : Union[str, Any] = text_embeddings.repeat_interleave(lowerCAmelCase_ , dim=0 )
# set timesteps
__UpperCAmelCase : List[Any] = "offset" in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() )
__UpperCAmelCase : List[str] = {}
if accepts_offset:
__UpperCAmelCase : Any = 1
self.scheduler.set_timesteps(lowerCAmelCase_ , **lowerCAmelCase_ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device )
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = self.get_timesteps(lowerCAmelCase_ , lowerCAmelCase_ , self.device )
__UpperCAmelCase : List[str] = timesteps[:1].repeat(lowerCAmelCase_ )
# Preprocess image
__UpperCAmelCase : str = preprocess(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
__UpperCAmelCase : Optional[int] = self.prepare_latents(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , text_embeddings.dtype , self.device , lowerCAmelCase_ )
__UpperCAmelCase : Optional[int] = preprocess(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
__UpperCAmelCase : Union[str, Any] = self.prepare_latents(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , text_embeddings.dtype , self.device , lowerCAmelCase_ )
__UpperCAmelCase : str = slerp(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
if clip_guidance_scale > 0:
__UpperCAmelCase : Dict = self.get_clip_image_embeddings(lowerCAmelCase_ , lowerCAmelCase_ )
__UpperCAmelCase : List[str] = self.get_clip_image_embeddings(lowerCAmelCase_ , lowerCAmelCase_ )
__UpperCAmelCase : Union[str, Any] = slerp(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
__UpperCAmelCase : List[str] = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
__UpperCAmelCase : int = content_text_input.input_ids.shape[-1]
__UpperCAmelCase : List[str] = self.tokenizer([""] , padding="max_length" , max_length=lowerCAmelCase_ , return_tensors="pt" )
__UpperCAmelCase : Optional[Any] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt
__UpperCAmelCase : Dict = uncond_embeddings.repeat_interleave(lowerCAmelCase_ , dim=0 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__UpperCAmelCase : int = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
__UpperCAmelCase : int = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
__UpperCAmelCase : List[str] = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
__UpperCAmelCase : Tuple = torch.randn(lowerCAmelCase_ , generator=lowerCAmelCase_ , device="cpu" , dtype=lowerCAmelCase_ ).to(
self.device )
else:
__UpperCAmelCase : Dict = torch.randn(lowerCAmelCase_ , generator=lowerCAmelCase_ , device=self.device , dtype=lowerCAmelCase_ )
else:
if latents.shape != latents_shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
__UpperCAmelCase : Any = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
__UpperCAmelCase : Tuple = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__UpperCAmelCase : Dict = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__UpperCAmelCase : List[str] = {}
if accepts_eta:
__UpperCAmelCase : str = eta
# check if the scheduler accepts generator
__UpperCAmelCase : Optional[Any] = "generator" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
if accepts_generator:
__UpperCAmelCase : Union[str, Any] = generator
with self.progress_bar(total=lowerCAmelCase_ ):
for i, t in enumerate(lowerCAmelCase_ ):
# expand the latents if we are doing classifier free guidance
__UpperCAmelCase : Dict = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__UpperCAmelCase : int = self.scheduler.scale_model_input(lowerCAmelCase_ , lowerCAmelCase_ )
# predict the noise residual
__UpperCAmelCase : Dict = self.unet(lowerCAmelCase_ , lowerCAmelCase_ , encoder_hidden_states=lowerCAmelCase_ ).sample
# perform classifier free guidance
if do_classifier_free_guidance:
__UpperCAmelCase , __UpperCAmelCase : int = noise_pred.chunk(2 )
__UpperCAmelCase : Union[str, Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
__UpperCAmelCase : int = (
text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings
)
__UpperCAmelCase , __UpperCAmelCase : Tuple = self.cond_fn(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , )
# compute the previous noisy sample x_t -> x_t-1
__UpperCAmelCase : List[Any] = self.scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ ).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
__UpperCAmelCase : int = 1 / 0.1_82_15 * latents
__UpperCAmelCase : Optional[Any] = self.vae.decode(lowerCAmelCase_ ).sample
__UpperCAmelCase : Optional[Any] = (image / 2 + 0.5).clamp(0 , 1 )
__UpperCAmelCase : List[str] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__UpperCAmelCase : Optional[int] = self.numpy_to_pil(lowerCAmelCase_ )
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=lowerCAmelCase_ , nsfw_content_detected=lowerCAmelCase_ )
| 382
|
from __future__ import annotations
def UpperCAmelCase ( UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase )-> dict[str, float]:
'''simple docstring'''
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if resistance < 0:
raise ValueError('''Resistance cannot be negative''' )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 393
| 0
|
'''simple docstring'''
import doctest
from collections import deque
import numpy as np
class snake_case :
"""simple docstring"""
def __init__( self ):
"""simple docstring"""
lowerCamelCase_ = [2, 1, 2, -1]
lowerCamelCase_ = [1, 2, 3, 4]
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = len(self.first_signal )
lowerCamelCase_ = len(self.second_signal )
lowerCamelCase_ = max(UpperCamelCase , UpperCamelCase )
# create a zero matrix of max_length x max_length
lowerCamelCase_ = [[0] * max_length for i in range(UpperCamelCase )]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(UpperCamelCase ):
lowerCamelCase_ = deque(self.second_signal )
rotated_signal.rotate(UpperCamelCase )
for j, item in enumerate(UpperCamelCase ):
matrix[i][j] += item
# multiply the matrix with the first signal
lowerCamelCase_ = np.matmul(np.transpose(UpperCamelCase ) , np.transpose(self.first_signal ) )
# rounding-off to two decimal places
return [round(UpperCamelCase , 2 ) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 445
|
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
a_ : Dict = logging.get_logger(__name__)
a_ : List[Any] = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
a_ : Optional[Any] = {
"""tokenizer_file""": {
"""EleutherAI/gpt-neox-20b""": """https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json""",
},
}
a_ : str = {
"""gpt-neox-20b""": 2048,
}
class snake_case ( lowercase ):
"""simple docstring"""
_lowerCamelCase = VOCAB_FILES_NAMES
_lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase = ["input_ids", "attention_mask"]
def __init__( self , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase="<|endoftext|>" , UpperCamelCase="<|endoftext|>" , UpperCamelCase="<|endoftext|>" , UpperCamelCase=False , **UpperCamelCase , ):
"""simple docstring"""
super().__init__(
UpperCamelCase , UpperCamelCase , tokenizer_file=UpperCamelCase , unk_token=UpperCamelCase , bos_token=UpperCamelCase , eos_token=UpperCamelCase , add_prefix_space=UpperCamelCase , **UpperCamelCase , )
lowerCamelCase_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , UpperCamelCase ) != add_prefix_space:
lowerCamelCase_ = getattr(UpperCamelCase , pre_tok_state.pop("type" ) )
lowerCamelCase_ = add_prefix_space
lowerCamelCase_ = pre_tok_class(**UpperCamelCase )
lowerCamelCase_ = add_prefix_space
def snake_case ( self , UpperCamelCase , UpperCamelCase = None ):
"""simple docstring"""
lowerCamelCase_ = self._tokenizer.model.save(UpperCamelCase , name=UpperCamelCase )
return tuple(UpperCamelCase )
def snake_case ( self , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) + [self.eos_token_id] )
if len(UpperCamelCase ) > self.model_max_length:
lowerCamelCase_ = input_ids[-self.model_max_length :]
return input_ids
| 445
| 1
|
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = ["""model.decoder.embed_positions.weights"""]
def SCREAMING_SNAKE_CASE_ ( _snake_case :List[str] ) -> List[Any]:
if "emb" in name:
_A = name.replace('''emb''' , '''model.decoder.embed_tokens''' )
if "transformer" in name:
_A = name.replace('''transformer''' , '''model.decoder''' )
if "cross_attention" in name:
_A = name.replace('''cross_attention''' , '''encoder_attn''' )
if "linear1" in name:
_A = name.replace('''linear1''' , '''fc1''' )
if "linear2" in name:
_A = name.replace('''linear2''' , '''fc2''' )
if "norm1" in name:
_A = name.replace('''norm1''' , '''self_attn_layer_norm''' )
if "norm_cross" in name:
_A = name.replace('''norm_cross''' , '''encoder_attn_layer_norm''' )
if "norm2" in name:
_A = name.replace('''norm2''' , '''final_layer_norm''' )
if "out_norm" in name:
_A = name.replace('''out_norm''' , '''model.decoder.layer_norm''' )
if "linears" in name:
_A = name.replace('''linears''' , '''lm_heads''' )
if "condition_provider.conditioners.description.output_proj" in name:
_A = name.replace('''condition_provider.conditioners.description.output_proj''' , '''enc_to_dec_proj''' )
return name
def SCREAMING_SNAKE_CASE_ ( _snake_case :OrderedDict , _snake_case :int ) -> Tuple[Dict, Dict]:
_A = list(state_dict.keys() )
_A = {}
for key in keys:
_A = state_dict.pop(_snake_case )
_A = rename_keys(_snake_case )
if "in_proj_weight" in key:
# split fused qkv proj
_A = val[:hidden_size, :]
_A = val[hidden_size : 2 * hidden_size, :]
_A = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
_A = val
else:
_A = val
return state_dict, enc_dec_proj_state_dict
def SCREAMING_SNAKE_CASE_ ( _snake_case :str ) -> MusicgenDecoderConfig:
if checkpoint == "small":
# default config values
_A = 1_024
_A = 24
_A = 16
elif checkpoint == "medium":
_A = 1_536
_A = 48
_A = 24
elif checkpoint == "large":
_A = 2_048
_A = 48
_A = 32
else:
raise ValueError(F'''Checkpoint should be one of `[\'small\', \'medium\', \'large\']`, got {checkpoint}.''' )
_A = MusicgenDecoderConfig(
hidden_size=_snake_case , ffn_dim=hidden_size * 4 , num_hidden_layers=_snake_case , num_attention_heads=_snake_case , )
return config
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ ( _snake_case :Optional[int] , _snake_case :int=None , _snake_case :int=None , _snake_case :Optional[int]="cpu" ) -> List[str]:
_A = MusicGen.get_pretrained(_snake_case , device=_snake_case )
_A = decoder_config_from_checkpoint(_snake_case )
_A = fairseq_model.lm.state_dict()
_A , _A = rename_state_dict(
_snake_case , hidden_size=decoder_config.hidden_size )
_A = TaEncoderModel.from_pretrained('''t5-base''' )
_A = EncodecModel.from_pretrained('''facebook/encodec_32khz''' )
_A = MusicgenForCausalLM(_snake_case ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
_A , _A = decoder.load_state_dict(_snake_case , strict=_snake_case )
for key in missing_keys.copy():
if key.startswith(('''text_encoder''', '''audio_encoder''') ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(_snake_case )
if len(_snake_case ) > 0:
raise ValueError(F'''Missing key(s) in state_dict: {missing_keys}''' )
if len(_snake_case ) > 0:
raise ValueError(F'''Unexpected key(s) in state_dict: {unexpected_keys}''' )
# init the composite model
_A = MusicgenForConditionalGeneration(text_encoder=_snake_case , audio_encoder=_snake_case , decoder=_snake_case )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(_snake_case )
# check we can do a forward pass
_A = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
_A = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
_A = model(input_ids=_snake_case , decoder_input_ids=_snake_case ).logits
if logits.shape != (8, 1, 2_048):
raise ValueError('''Incorrect shape for logits''' )
# now construct the processor
_A = AutoTokenizer.from_pretrained('''t5-base''' )
_A = AutoFeatureExtractor.from_pretrained('''facebook/encodec_32khz''' , padding_side='''left''' )
_A = MusicgenProcessor(feature_extractor=_snake_case , tokenizer=_snake_case )
# set the appropriate bos/pad token ids
_A = 2_048
_A = 2_048
# set other default generation config params
_A = int(30 * audio_encoder.config.frame_rate )
_A = True
_A = 3.0
if pytorch_dump_folder is not None:
Path(_snake_case ).mkdir(exist_ok=_snake_case )
logger.info(F'''Saving model {checkpoint} to {pytorch_dump_folder}''' )
model.save_pretrained(_snake_case )
processor.save_pretrained(_snake_case )
if repo_id:
logger.info(F'''Pushing model {checkpoint} to {repo_id}''' )
model.push_to_hub(_snake_case )
processor.push_to_hub(_snake_case )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint""",
default="""small""",
type=str,
help="""Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.""",
)
parser.add_argument(
"""--pytorch_dump_folder""",
required=True,
default=None,
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
parser.add_argument(
"""--device""", default="""cpu""", type=str, help="""Torch device to run the conversion, either cpu or cuda."""
)
UpperCAmelCase_ = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 2
|
UpperCAmelCase_ = 0 # The first color of the flag.
UpperCAmelCase_ = 1 # The second color of the flag.
UpperCAmelCase_ = 2 # The third color of the flag.
UpperCAmelCase_ = (red, white, blue)
def SCREAMING_SNAKE_CASE_ ( _snake_case :list ) -> list:
if not sequence:
return []
if len(_snake_case ) == 1:
return list(_snake_case )
_A = 0
_A = len(_snake_case ) - 1
_A = 0
while mid <= high:
if sequence[mid] == colors[0]:
_A , _A = sequence[mid], sequence[low]
low += 1
mid += 1
elif sequence[mid] == colors[1]:
mid += 1
elif sequence[mid] == colors[2]:
_A , _A = sequence[high], sequence[mid]
high -= 1
else:
_A = F'''The elements inside the sequence must contains only {colors} values'''
raise ValueError(_snake_case )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase_ = input("""Enter numbers separated by commas:\n""").strip()
UpperCAmelCase_ = [int(item.strip()) for item in user_input.split(""",""")]
print(f'{dutch_national_flag_sort(unsorted)}')
| 2
| 1
|
"""simple docstring"""
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class snake_case__ ( snake_case_ ):
def __init__( self , lowerCamelCase=0.01 , lowerCamelCase=1000 ):
__a = p_stop
__a = max_length
def __iter__( self ):
__a = 0
__a = False
while not stop and count < self.max_length:
yield count
count += 1
__a = random.random() < self.p_stop
class snake_case__ ( unittest.TestCase ):
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase=False , lowerCamelCase=True ):
__a = [
BatchSamplerShard(__UpperCamelCase , 2 , __UpperCamelCase , split_batches=__UpperCamelCase , even_batches=__UpperCamelCase )
for i in range(2 )
]
__a = [list(__UpperCamelCase ) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(__UpperCamelCase ) for shard in batch_sampler_shards] , [len(__UpperCamelCase ) for e in expected] )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
def a__ ( self ):
# Check the shards when the dataset is a round multiple of total batch size.
__a = BatchSampler(range(24 ) , batch_size=3 , drop_last=__UpperCamelCase )
__a = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(__UpperCamelCase , __UpperCamelCase )
__a = BatchSampler(range(24 ) , batch_size=3 , drop_last=__UpperCamelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(__UpperCamelCase , __UpperCamelCase )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
__a = BatchSampler(range(21 ) , batch_size=3 , drop_last=__UpperCamelCase )
__a = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]],
]
self.check_batch_sampler_shards(__UpperCamelCase , __UpperCamelCase )
__a = BatchSampler(range(21 ) , batch_size=3 , drop_last=__UpperCamelCase )
__a = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__UpperCamelCase , __UpperCamelCase )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
__a = BatchSampler(range(22 ) , batch_size=3 , drop_last=__UpperCamelCase )
__a = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]],
]
self.check_batch_sampler_shards(__UpperCamelCase , __UpperCamelCase )
__a = BatchSampler(range(22 ) , batch_size=3 , drop_last=__UpperCamelCase )
__a = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__UpperCamelCase , __UpperCamelCase )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
__a = BatchSampler(range(20 ) , batch_size=3 , drop_last=__UpperCamelCase )
__a = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]],
]
self.check_batch_sampler_shards(__UpperCamelCase , __UpperCamelCase )
__a = BatchSampler(range(20 ) , batch_size=3 , drop_last=__UpperCamelCase )
__a = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__UpperCamelCase , __UpperCamelCase )
# Check the shards when the dataset is very small.
__a = BatchSampler(range(2 ) , batch_size=3 , drop_last=__UpperCamelCase )
__a = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(__UpperCamelCase , __UpperCamelCase )
__a = BatchSampler(range(2 ) , batch_size=3 , drop_last=__UpperCamelCase )
__a = [[], []]
self.check_batch_sampler_shards(__UpperCamelCase , __UpperCamelCase )
def a__ ( self ):
# Check the shards when the dataset is a round multiple of batch size.
__a = BatchSampler(range(24 ) , batch_size=4 , drop_last=__UpperCamelCase )
__a = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(__UpperCamelCase , __UpperCamelCase , split_batches=__UpperCamelCase )
__a = BatchSampler(range(24 ) , batch_size=4 , drop_last=__UpperCamelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(__UpperCamelCase , __UpperCamelCase , split_batches=__UpperCamelCase )
# Check the shards when the dataset is not a round multiple of batch size.
__a = BatchSampler(range(22 ) , batch_size=4 , drop_last=__UpperCamelCase )
__a = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]],
]
self.check_batch_sampler_shards(__UpperCamelCase , __UpperCamelCase , split_batches=__UpperCamelCase )
__a = BatchSampler(range(22 ) , batch_size=4 , drop_last=__UpperCamelCase )
__a = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__UpperCamelCase , __UpperCamelCase , split_batches=__UpperCamelCase )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
__a = BatchSampler(range(21 ) , batch_size=4 , drop_last=__UpperCamelCase )
__a = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]],
]
self.check_batch_sampler_shards(__UpperCamelCase , __UpperCamelCase , split_batches=__UpperCamelCase )
__a = BatchSampler(range(21 ) , batch_size=4 , drop_last=__UpperCamelCase )
__a = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__UpperCamelCase , __UpperCamelCase , split_batches=__UpperCamelCase )
# Check the shards when the dataset is very small.
__a = BatchSampler(range(2 ) , batch_size=4 , drop_last=__UpperCamelCase )
__a = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(__UpperCamelCase , __UpperCamelCase , split_batches=__UpperCamelCase )
__a = BatchSampler(range(2 ) , batch_size=4 , drop_last=__UpperCamelCase )
__a = [[], []]
self.check_batch_sampler_shards(__UpperCamelCase , __UpperCamelCase , split_batches=__UpperCamelCase )
def a__ ( self ):
# Check the shards when the dataset is a round multiple of total batch size.
__a = BatchSampler(range(24 ) , batch_size=3 , drop_last=__UpperCamelCase )
__a = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(__UpperCamelCase , __UpperCamelCase , even_batches=__UpperCamelCase )
__a = BatchSampler(range(24 ) , batch_size=3 , drop_last=__UpperCamelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(__UpperCamelCase , __UpperCamelCase , even_batches=__UpperCamelCase )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
__a = BatchSampler(range(21 ) , batch_size=3 , drop_last=__UpperCamelCase )
__a = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__UpperCamelCase , __UpperCamelCase , even_batches=__UpperCamelCase )
__a = BatchSampler(range(21 ) , batch_size=3 , drop_last=__UpperCamelCase )
__a = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__UpperCamelCase , __UpperCamelCase , even_batches=__UpperCamelCase )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
__a = BatchSampler(range(22 ) , batch_size=3 , drop_last=__UpperCamelCase )
__a = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]],
]
self.check_batch_sampler_shards(__UpperCamelCase , __UpperCamelCase , even_batches=__UpperCamelCase )
__a = BatchSampler(range(22 ) , batch_size=3 , drop_last=__UpperCamelCase )
__a = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__UpperCamelCase , __UpperCamelCase , even_batches=__UpperCamelCase )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
__a = BatchSampler(range(20 ) , batch_size=3 , drop_last=__UpperCamelCase )
__a = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__UpperCamelCase , __UpperCamelCase , even_batches=__UpperCamelCase )
__a = BatchSampler(range(20 ) , batch_size=3 , drop_last=__UpperCamelCase )
__a = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__UpperCamelCase , __UpperCamelCase , even_batches=__UpperCamelCase )
# Check the shards when the dataset is very small.
__a = BatchSampler(range(2 ) , batch_size=3 , drop_last=__UpperCamelCase )
__a = [[[0, 1]], []]
self.check_batch_sampler_shards(__UpperCamelCase , __UpperCamelCase , even_batches=__UpperCamelCase )
__a = BatchSampler(range(2 ) , batch_size=3 , drop_last=__UpperCamelCase )
__a = [[], []]
self.check_batch_sampler_shards(__UpperCamelCase , __UpperCamelCase , even_batches=__UpperCamelCase )
def a__ ( self ):
# Check the shards when the dataset is a round multiple of batch size.
__a = BatchSampler(range(24 ) , batch_size=4 , drop_last=__UpperCamelCase )
__a = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(__UpperCamelCase , __UpperCamelCase , split_batches=__UpperCamelCase , even_batches=__UpperCamelCase )
__a = BatchSampler(range(24 ) , batch_size=4 , drop_last=__UpperCamelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(__UpperCamelCase , __UpperCamelCase , split_batches=__UpperCamelCase , even_batches=__UpperCamelCase )
# Check the shards when the dataset is not a round multiple of batch size.
__a = BatchSampler(range(22 ) , batch_size=4 , drop_last=__UpperCamelCase )
__a = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__UpperCamelCase , __UpperCamelCase , split_batches=__UpperCamelCase , even_batches=__UpperCamelCase )
__a = BatchSampler(range(22 ) , batch_size=4 , drop_last=__UpperCamelCase )
__a = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__UpperCamelCase , __UpperCamelCase , split_batches=__UpperCamelCase , even_batches=__UpperCamelCase )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
__a = BatchSampler(range(21 ) , batch_size=4 , drop_last=__UpperCamelCase )
__a = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__UpperCamelCase , __UpperCamelCase , split_batches=__UpperCamelCase , even_batches=__UpperCamelCase )
__a = BatchSampler(range(21 ) , batch_size=4 , drop_last=__UpperCamelCase )
__a = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__UpperCamelCase , __UpperCamelCase , split_batches=__UpperCamelCase , even_batches=__UpperCamelCase )
# Check the shards when the dataset is very small.
__a = BatchSampler(range(2 ) , batch_size=4 , drop_last=__UpperCamelCase )
__a = [[[0, 1]], []]
self.check_batch_sampler_shards(__UpperCamelCase , __UpperCamelCase , split_batches=__UpperCamelCase , even_batches=__UpperCamelCase )
__a = BatchSampler(range(2 ) , batch_size=4 , drop_last=__UpperCamelCase )
__a = [[], []]
self.check_batch_sampler_shards(__UpperCamelCase , __UpperCamelCase , split_batches=__UpperCamelCase , even_batches=__UpperCamelCase )
def a__ ( self ):
__a = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]]
__a = [BatchSamplerShard(__UpperCamelCase , 2 , __UpperCamelCase , even_batches=__UpperCamelCase ) for i in range(2 )]
self.assertEqual(len(batch_sampler_shards[0] ) , 3 )
self.assertEqual(len(batch_sampler_shards[1] ) , 2 )
self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [12, 13]] )
self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 10, 11]] )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=False , lowerCamelCase=2 , lowerCamelCase=False ):
random.seed(__UpperCamelCase )
__a = list(__UpperCamelCase )
__a = [
IterableDatasetShard(
__UpperCamelCase , batch_size=__UpperCamelCase , drop_last=__UpperCamelCase , num_processes=__UpperCamelCase , process_index=__UpperCamelCase , split_batches=__UpperCamelCase , )
for i in range(__UpperCamelCase )
]
__a = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(__UpperCamelCase )
iterable_dataset_lists.append(list(__UpperCamelCase ) )
__a = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
__a = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(__UpperCamelCase ) , len(__UpperCamelCase ) )
self.assertTrue(len(__UpperCamelCase ) % shard_batch_size == 0 )
__a = []
for idx in range(0 , len(__UpperCamelCase ) , __UpperCamelCase ):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(__UpperCamelCase ) < len(__UpperCamelCase ):
reference += reference
self.assertListEqual(__UpperCamelCase , reference[: len(__UpperCamelCase )] )
def a__ ( self ):
__a = 42
__a = RandomIterableDataset()
self.check_iterable_dataset_shards(__UpperCamelCase , __UpperCamelCase , batch_size=4 , drop_last=__UpperCamelCase , split_batches=__UpperCamelCase )
self.check_iterable_dataset_shards(__UpperCamelCase , __UpperCamelCase , batch_size=4 , drop_last=__UpperCamelCase , split_batches=__UpperCamelCase )
self.check_iterable_dataset_shards(__UpperCamelCase , __UpperCamelCase , batch_size=4 , drop_last=__UpperCamelCase , split_batches=__UpperCamelCase )
self.check_iterable_dataset_shards(__UpperCamelCase , __UpperCamelCase , batch_size=4 , drop_last=__UpperCamelCase , split_batches=__UpperCamelCase )
# Edge case with a very small dataset
__a = RandomIterableDataset(max_length=2 )
self.check_iterable_dataset_shards(__UpperCamelCase , __UpperCamelCase , batch_size=4 , drop_last=__UpperCamelCase , split_batches=__UpperCamelCase )
self.check_iterable_dataset_shards(__UpperCamelCase , __UpperCamelCase , batch_size=4 , drop_last=__UpperCamelCase , split_batches=__UpperCamelCase )
self.check_iterable_dataset_shards(__UpperCamelCase , __UpperCamelCase , batch_size=4 , drop_last=__UpperCamelCase , split_batches=__UpperCamelCase )
self.check_iterable_dataset_shards(__UpperCamelCase , __UpperCamelCase , batch_size=4 , drop_last=__UpperCamelCase , split_batches=__UpperCamelCase )
def a__ ( self ):
__a = BatchSampler(range(16 ) , batch_size=4 , drop_last=__UpperCamelCase )
__a = SkipBatchSampler(__UpperCamelCase , 2 )
self.assertListEqual(list(__UpperCamelCase ) , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def a__ ( self ):
__a = SkipDataLoader(list(range(16 ) ) , batch_size=4 , skip_batches=2 )
self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def a__ ( self ):
__a = DataLoader(list(range(16 ) ) , batch_size=4 )
__a = skip_first_batches(__UpperCamelCase , num_batches=2 )
self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def a__ ( self ):
__a = DataLoaderShard(list(range(16 ) ) , batch_size=4 )
for idx, _ in enumerate(__UpperCamelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(__UpperCamelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
def a__ ( self ):
Accelerator()
__a = DataLoaderDispatcher(range(16 ) , batch_size=4 )
for idx, _ in enumerate(__UpperCamelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(__UpperCamelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
| 706
|
"""simple docstring"""
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class snake_case__ :
def __init__( self , lowerCamelCase , lowerCamelCase=99 , lowerCamelCase=13 , lowerCamelCase=7 , lowerCamelCase=9 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=False , lowerCamelCase=32 , lowerCamelCase=5 , lowerCamelCase=4 , lowerCamelCase=37 , lowerCamelCase=8 , lowerCamelCase=0.1 , lowerCamelCase=0.002 , lowerCamelCase=1 , lowerCamelCase=0 , lowerCamelCase=0 , lowerCamelCase=None , lowerCamelCase=None , ):
__a = parent
__a = batch_size
__a = encoder_seq_length
__a = decoder_seq_length
# For common tests
__a = self.decoder_seq_length
__a = is_training
__a = use_attention_mask
__a = use_labels
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = d_ff
__a = relative_attention_num_buckets
__a = dropout_rate
__a = initializer_factor
__a = eos_token_id
__a = pad_token_id
__a = decoder_start_token_id
__a = None
__a = decoder_layers
def a__ ( self ):
return TaConfig.from_pretrained("google/umt5-base" )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , ):
if attention_mask is None:
__a = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
__a = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
__a = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=lowerCamelCase )
if decoder_head_mask is None:
__a = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=lowerCamelCase )
if cross_attn_head_mask is None:
__a = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=lowerCamelCase )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def a__ ( self ):
__a = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size )
__a = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
__a = input_ids.clamp(self.pad_token_id + 1 )
__a = decoder_input_ids.clamp(self.pad_token_id + 1 )
__a = self.get_config()
__a = config.num_attention_heads
__a = self.prepare_inputs_dict(lowerCamelCase , lowerCamelCase , lowerCamelCase )
return config, input_dict
def a__ ( self ):
__a , __a = self.prepare_config_and_inputs()
return config, inputs_dict
def a__ ( self ):
return TaConfig(
vocab_size=166 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def a__ ( self ):
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ):
__a = UMTaModel(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__a = model(
input_ids=lowerCamelCase , decoder_input_ids=lowerCamelCase , attention_mask=lowerCamelCase , decoder_attention_mask=lowerCamelCase , )
__a = model(input_ids=lowerCamelCase , decoder_input_ids=lowerCamelCase )
__a = result.last_hidden_state
__a = result.past_key_values
__a = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(lowerCamelCase ) , config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) , 4 )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ):
__a = UMTaModel(config=lowerCamelCase ).get_decoder().to(lowerCamelCase ).eval()
# first forward pass
__a = model(lowerCamelCase , use_cache=lowerCamelCase )
__a = model(lowerCamelCase )
__a = model(lowerCamelCase , use_cache=lowerCamelCase )
self.parent.assertTrue(len(lowerCamelCase ) == len(lowerCamelCase ) )
self.parent.assertTrue(len(lowerCamelCase ) == len(lowerCamelCase ) + 1 )
__a , __a = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__a = ids_tensor((self.batch_size, 1) , config.vocab_size )
# append to next input_ids and
__a = torch.cat([input_ids, next_tokens] , dim=-1 )
__a = model(lowerCamelCase )["last_hidden_state"]
__a = model(lowerCamelCase , past_key_values=lowerCamelCase )["last_hidden_state"]
# select random slice
__a = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__a = output_from_no_past[:, -1, random_slice_idx].detach()
__a = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCamelCase , lowerCamelCase , atol=1E-3 ) )
def a__ ( self , lowerCamelCase , lowerCamelCase , ):
__a = UMTaModel(config=lowerCamelCase ).to(lowerCamelCase ).half().eval()
__a = model(**lowerCamelCase )["last_hidden_state"]
self.parent.assertFalse(torch.isnan(lowerCamelCase ).any().item() )
@require_torch
class snake_case__ ( snake_case_, snake_case_, snake_case_, unittest.TestCase ):
_snake_case : Union[str, Any] = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
_snake_case : int = (UMTaForConditionalGeneration,) if is_torch_available() else ()
_snake_case : Optional[int] = (
{
"""conversational""": UMTaForConditionalGeneration,
"""feature-extraction""": UMTaModel,
"""summarization""": UMTaForConditionalGeneration,
"""text2text-generation""": UMTaForConditionalGeneration,
"""translation""": UMTaForConditionalGeneration,
"""question-answering""": UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
_snake_case : List[Any] = True
_snake_case : Union[str, Any] = False
_snake_case : Union[str, Any] = False
_snake_case : Tuple = True
_snake_case : List[str] = True
# The small UMT5 model needs higher percentages for CPU/MP tests
_snake_case : Optional[Any] = [0.8, 0.9]
def a__ ( self ):
__a = UMTaModelTester(self )
@unittest.skip("Test has a segmentation fault on torch 1.8.0" )
def a__ ( self ):
__a = self.model_tester.prepare_config_and_inputs()
__a = UMTaModel(config_and_inputs[0] ).to(lowerCamelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
lowerCamelCase , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , F"{tmpdirname}/t5_test.onnx" , export_params=lowerCamelCase , opset_version=9 , input_names=["input_ids", "decoder_input_ids"] , )
@unittest.skipIf(torch_device == "cpu" , "Cant do half precision" )
def a__ ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*lowerCamelCase )
def a__ ( self ):
__a = ["encoder_attentions", "decoder_attentions", "cross_attentions"]
__a = self.model_tester.prepare_config_and_inputs()
__a = config_and_inputs[0]
__a = UMTaForConditionalGeneration(lowerCamelCase ).eval()
model.to(lowerCamelCase )
__a = {
"head_mask": torch.zeros(config.num_layers , config.num_heads , device=lowerCamelCase ),
"decoder_head_mask": torch.zeros(config.num_decoder_layers , config.num_heads , device=lowerCamelCase ),
"cross_attn_head_mask": torch.zeros(config.num_decoder_layers , config.num_heads , device=lowerCamelCase ),
}
for attn_name, (name, mask) in zip(lowerCamelCase , head_masking.items() ):
__a = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
__a = torch.ones(
config.num_decoder_layers , config.num_heads , device=lowerCamelCase )
__a = model.generate(
config_and_inputs[1]["input_ids"] , num_beams=1 , max_length=3 , output_attentions=lowerCamelCase , return_dict_in_generate=lowerCamelCase , **lowerCamelCase , )
# We check the state of decoder_attentions and cross_attentions just from the last step
__a = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 )
@unittest.skip("Does not work on the tiny model as we keep hitting edge cases." )
def a__ ( self ):
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class snake_case__ ( unittest.TestCase ):
@slow
@unittest.skip(
"Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged" )
def a__ ( self ):
__a = UMTaForConditionalGeneration.from_pretrained("google/umt5-small" , return_dict=lowerCamelCase ).to(lowerCamelCase )
__a = AutoTokenizer.from_pretrained("google/umt5-small" , use_fast=lowerCamelCase , legacy=lowerCamelCase )
__a = [
"Bonjour monsieur <extra_id_0> bien <extra_id_1>.",
"No se como puedo <extra_id_0>.",
"This is the reason why we <extra_id_0> them.",
"The <extra_id_0> walks in <extra_id_1>, seats",
"A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.",
]
__a = tokenizer(lowerCamelCase , return_tensors="pt" , padding=lowerCamelCase ).input_ids
# fmt: off
__a = torch.tensor(
[
[ 38530, 210703, 256299, 1410, 256298, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 826, 321, 671, 25922, 256299, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 1460, 339, 312, 19014, 10620, 758, 256299, 2355,274, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 517, 256299, 14869, 281, 301, 256298, 275, 119983,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 320, 256299, 14869, 281, 2234, 289, 2275, 333,61391, 289, 256298, 543, 256297, 168714, 329, 256296,274, 1],
] )
# fmt: on
torch.testing.assert_allclose(lowerCamelCase , lowerCamelCase )
__a = model.generate(input_ids.to(lowerCamelCase ) )
__a = [
"<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>",
"<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
"<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
"<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
"<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
]
__a = tokenizer.batch_decode(lowerCamelCase )
self.assertEqual(lowerCamelCase , lowerCamelCase )
| 67
| 0
|
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class UpperCAmelCase__ ( A__ , A__ ):
"""simple docstring"""
@register_to_config
def __init__( self : Optional[int] , __lowerCamelCase : int = 768 , ) -> str:
super().__init__()
SCREAMING_SNAKE_CASE__ = nn.Parameter(torch.zeros(1 , __lowerCamelCase ) )
SCREAMING_SNAKE_CASE__ = nn.Parameter(torch.ones(1 , __lowerCamelCase ) )
def lowercase_ ( self : List[str] , __lowerCamelCase : Optional[Union[str, torch.device]] = None , __lowerCamelCase : Optional[torch.dtype] = None , ) -> List[str]:
SCREAMING_SNAKE_CASE__ = nn.Parameter(self.mean.to(__lowerCamelCase ).to(__lowerCamelCase ) )
SCREAMING_SNAKE_CASE__ = nn.Parameter(self.std.to(__lowerCamelCase ).to(__lowerCamelCase ) )
return self
def lowercase_ ( self : List[Any] , __lowerCamelCase : int ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = (embeds - self.mean) * 1.0 / self.std
return embeds
def lowercase_ ( self : int , __lowerCamelCase : Optional[int] ) -> str:
SCREAMING_SNAKE_CASE__ = (embeds * self.std) + self.mean
return embeds
| 493
|
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class UpperCAmelCase__ :
"""simple docstring"""
@staticmethod
def lowercase_ ( *__lowerCamelCase : int , **__lowerCamelCase : List[Any] ) -> List[str]:
pass
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
_SCREAMING_SNAKE_CASE : Optional[Any] = (
'''https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png'''
)
@is_pipeline_test
@require_torch
@require_vision
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
a = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def lowercase_ ( self : Optional[int] , __lowerCamelCase : Tuple , __lowerCamelCase : int , __lowerCamelCase : Any ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = pipeline(
'''document-question-answering''' , model=__lowerCamelCase , tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = INVOICE_URL
SCREAMING_SNAKE_CASE__ = list(zip(*apply_tesseract(load_image(__lowerCamelCase ) , __lowerCamelCase , '''''' ) ) )
SCREAMING_SNAKE_CASE__ = '''What is the placebo?'''
SCREAMING_SNAKE_CASE__ = [
{
'''image''': load_image(__lowerCamelCase ),
'''question''': question,
},
{
'''image''': image,
'''question''': question,
},
{
'''image''': image,
'''question''': question,
'''word_boxes''': word_boxes,
},
]
return dqa_pipeline, examples
def lowercase_ ( self : int , __lowerCamelCase : Dict , __lowerCamelCase : List[str] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = dqa_pipeline(__lowerCamelCase , top_k=2 )
self.assertEqual(
__lowerCamelCase , [
[
{'''score''': ANY(__lowerCamelCase ), '''answer''': ANY(__lowerCamelCase ), '''start''': ANY(__lowerCamelCase ), '''end''': ANY(__lowerCamelCase )},
{'''score''': ANY(__lowerCamelCase ), '''answer''': ANY(__lowerCamelCase ), '''start''': ANY(__lowerCamelCase ), '''end''': ANY(__lowerCamelCase )},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def lowercase_ ( self : List[Any] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = pipeline('''document-question-answering''' , model='''hf-internal-testing/tiny-random-layoutlmv2''' )
SCREAMING_SNAKE_CASE__ = INVOICE_URL
SCREAMING_SNAKE_CASE__ = '''How many cats are there?'''
SCREAMING_SNAKE_CASE__ = [
{'''score''': 0.0001, '''answer''': '''oy 2312/2019''', '''start''': 38, '''end''': 39},
{'''score''': 0.0001, '''answer''': '''oy 2312/2019 DUE''', '''start''': 38, '''end''': 40},
]
SCREAMING_SNAKE_CASE__ = dqa_pipeline(image=__lowerCamelCase , question=__lowerCamelCase , top_k=2 )
self.assertEqual(nested_simplify(__lowerCamelCase , decimals=4 ) , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(nested_simplify(__lowerCamelCase , decimals=4 ) , __lowerCamelCase )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
SCREAMING_SNAKE_CASE__ = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
SCREAMING_SNAKE_CASE__ = dqa_pipeline(image=__lowerCamelCase , question=__lowerCamelCase , top_k=2 )
self.assertEqual(__lowerCamelCase , [] )
# We can optionnally pass directly the words and bounding boxes
SCREAMING_SNAKE_CASE__ = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = dqa_pipeline(image=__lowerCamelCase , question=__lowerCamelCase , words=__lowerCamelCase , boxes=__lowerCamelCase , top_k=2 )
self.assertEqual(__lowerCamelCase , [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def lowercase_ ( self : List[Any] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = pipeline(
'''document-question-answering''' , model='''tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa''' , revision='''9977165''' , )
SCREAMING_SNAKE_CASE__ = INVOICE_URL
SCREAMING_SNAKE_CASE__ = '''What is the invoice number?'''
SCREAMING_SNAKE_CASE__ = dqa_pipeline(image=__lowerCamelCase , question=__lowerCamelCase , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
{'''score''': 0.9944, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0009, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
SCREAMING_SNAKE_CASE__ = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
{'''score''': 0.9944, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0009, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
SCREAMING_SNAKE_CASE__ = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
[
{'''score''': 0.9944, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0009, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def lowercase_ ( self : List[str] ) -> List[str]:
SCREAMING_SNAKE_CASE__ = pipeline(
'''document-question-answering''' , model='''tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa''' , revision='''9977165''' , max_seq_len=50 , )
SCREAMING_SNAKE_CASE__ = INVOICE_URL
SCREAMING_SNAKE_CASE__ = '''What is the invoice number?'''
SCREAMING_SNAKE_CASE__ = dqa_pipeline(image=__lowerCamelCase , question=__lowerCamelCase , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
{'''score''': 0.9974, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9948, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
SCREAMING_SNAKE_CASE__ = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
{'''score''': 0.9974, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9948, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
SCREAMING_SNAKE_CASE__ = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
[
{'''score''': 0.9974, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9948, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def lowercase_ ( self : Dict ) -> int:
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(
'''impira/layoutlm-document-qa''' , revision='''3dc6de3''' , add_prefix_space=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = pipeline(
'''document-question-answering''' , model='''impira/layoutlm-document-qa''' , tokenizer=__lowerCamelCase , revision='''3dc6de3''' , )
SCREAMING_SNAKE_CASE__ = INVOICE_URL
SCREAMING_SNAKE_CASE__ = '''What is the invoice number?'''
SCREAMING_SNAKE_CASE__ = dqa_pipeline(image=__lowerCamelCase , question=__lowerCamelCase , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
{'''score''': 0.4251, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0819, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
SCREAMING_SNAKE_CASE__ = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
{'''score''': 0.4251, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0819, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
SCREAMING_SNAKE_CASE__ = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
[
{'''score''': 0.4251, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0819, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
]
]
* 2 , )
SCREAMING_SNAKE_CASE__ = list(zip(*apply_tesseract(load_image(__lowerCamelCase ) , __lowerCamelCase , '''''' ) ) )
# This model should also work if `image` is set to None
SCREAMING_SNAKE_CASE__ = dqa_pipeline({'''image''': None, '''word_boxes''': word_boxes, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
{'''score''': 0.4251, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0819, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def lowercase_ ( self : Optional[int] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(
'''impira/layoutlm-document-qa''' , revision='''3dc6de3''' , add_prefix_space=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = pipeline(
'''document-question-answering''' , model='''impira/layoutlm-document-qa''' , tokenizer=__lowerCamelCase , revision='''3dc6de3''' , max_seq_len=50 , )
SCREAMING_SNAKE_CASE__ = INVOICE_URL
SCREAMING_SNAKE_CASE__ = '''What is the invoice number?'''
SCREAMING_SNAKE_CASE__ = dqa_pipeline(image=__lowerCamelCase , question=__lowerCamelCase , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
{'''score''': 0.9999, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9998, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
SCREAMING_SNAKE_CASE__ = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
[
{'''score''': 0.9999, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9998, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
]
]
* 2 , )
SCREAMING_SNAKE_CASE__ = list(zip(*apply_tesseract(load_image(__lowerCamelCase ) , __lowerCamelCase , '''''' ) ) )
# This model should also work if `image` is set to None
SCREAMING_SNAKE_CASE__ = dqa_pipeline({'''image''': None, '''word_boxes''': word_boxes, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
{'''score''': 0.9999, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9998, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
@slow
@require_torch
def lowercase_ ( self : Optional[int] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = pipeline(
'''document-question-answering''' , model='''naver-clova-ix/donut-base-finetuned-docvqa''' , tokenizer=AutoTokenizer.from_pretrained('''naver-clova-ix/donut-base-finetuned-docvqa''' ) , feature_extractor='''naver-clova-ix/donut-base-finetuned-docvqa''' , )
SCREAMING_SNAKE_CASE__ = INVOICE_URL
SCREAMING_SNAKE_CASE__ = '''What is the invoice number?'''
SCREAMING_SNAKE_CASE__ = dqa_pipeline(image=__lowerCamelCase , question=__lowerCamelCase , top_k=2 )
self.assertEqual(nested_simplify(__lowerCamelCase , decimals=4 ) , [{'''answer''': '''us-001'''}] )
@require_tf
@unittest.skip('''Document question answering not implemented in TF''' )
def lowercase_ ( self : Union[str, Any] ) -> Optional[Any]:
pass
| 493
| 1
|
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
__lowerCamelCase = get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.plbart.modeling_plbart import shift_tokens_right
__lowerCamelCase = 5_0_0_0_3
__lowerCamelCase = 5_0_0_0_2
@require_sentencepiece
@require_tokenizers
class _UpperCamelCase( __lowercase , unittest.TestCase ):
__A: List[str] = PLBartTokenizer
__A: Any = None
__A: Dict = False
def a__ ( self : int ):
super().setUp()
# We have a SentencePiece fixture for testing
_UpperCAmelCase : List[Any] = PLBartTokenizer(__a , language_codes="base" , keep_accents=__a )
tokenizer.save_pretrained(self.tmpdirname )
def a__ ( self : int ):
_UpperCAmelCase : Optional[Any] = PLBartTokenizer(__a , language_codes="base" , keep_accents=__a )
_UpperCAmelCase : List[str] = tokenizer.tokenize("This is a test" )
self.assertListEqual(__a , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__a ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
_UpperCAmelCase : Optional[Any] = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
__a , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
_UpperCAmelCase : str = tokenizer.convert_tokens_to_ids(__a )
self.assertListEqual(
__a , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
_UpperCAmelCase : Dict = tokenizer.convert_ids_to_tokens(__a )
self.assertListEqual(
__a , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
_UpperCAmelCase : List[Any] = tokenizer.vocab_size
_UpperCAmelCase : str = [tokenizer.convert_ids_to_tokens(__a ) for x in range(end - 4 , __a )]
self.assertListEqual(__a , ["__java__", "__python__", "__en_XX__", "<mask>"] )
_UpperCAmelCase : Optional[Any] = """java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"""
_UpperCAmelCase : Optional[int] = tokenizer(__a ).input_ids
self.assertEqual(
tokenizer.decode(__a , skip_special_tokens=__a , clean_up_tokenization_spaces=__a ) , __a , )
def a__ ( self : Optional[int] ):
_UpperCAmelCase : Optional[Any] = PLBartTokenizer(__a , language_codes="multi" , keep_accents=__a )
_UpperCAmelCase : Optional[int] = tokenizer.tokenize("This is a test" )
self.assertListEqual(__a , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__a ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
_UpperCAmelCase : int = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
__a , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
_UpperCAmelCase : Optional[Any] = tokenizer.convert_tokens_to_ids(__a )
self.assertListEqual(
__a , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
_UpperCAmelCase : str = tokenizer.convert_ids_to_tokens(__a )
self.assertListEqual(
__a , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
_UpperCAmelCase : Tuple = tokenizer.vocab_size
_UpperCAmelCase : int = [tokenizer.convert_ids_to_tokens(__a ) for x in range(end - 7 , __a )]
self.assertListEqual(
__a , ["__java__", "__python__", "__en_XX__", "__javascript__", "__php__", "__ruby__", "__go__"] )
_UpperCAmelCase : Tuple = """java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"""
_UpperCAmelCase : List[Any] = tokenizer(__a ).input_ids
self.assertEqual(
tokenizer.decode(__a , skip_special_tokens=__a , clean_up_tokenization_spaces=__a ) , __a , )
@require_torch
@require_sentencepiece
@require_tokenizers
class _UpperCamelCase( unittest.TestCase ):
__A: Optional[int] = '''uclanlp/plbart-python-en_XX'''
__A: str = [
'''def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])''',
'''def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])''',
]
__A: Dict = [
'''Returns the maximum value of a b c.''',
'''Sums the values of a b c.''',
]
__A: Tuple = [
1_34,
54_52,
3_34_60,
3_34_41,
3_34_63,
3_34_65,
3_34_63,
3_34_49,
9_88,
20,
3_34_56,
19,
3_34_56,
7_71,
39,
42_58,
8_89,
33_18,
3_34_41,
3_34_63,
3_34_65,
3_34_63,
3_34_49,
24_71,
2,
PYTHON_CODE,
]
@classmethod
def a__ ( cls : Any ):
_UpperCAmelCase : PLBartTokenizer = PLBartTokenizer.from_pretrained(
cls.checkpoint_name , language_codes="base" , src_lang="python" , tgt_lang="en_XX" )
_UpperCAmelCase : int = 1
return cls
def a__ ( self : Tuple ):
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__java__"] , 5_00_01 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__python__"] , 5_00_02 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__en_XX__"] , 5_00_03 )
def a__ ( self : List[str] ):
_UpperCAmelCase : Any = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , __a )
def a__ ( self : Optional[int] ):
self.assertIn(__a , self.tokenizer.all_special_ids )
_UpperCAmelCase : List[str] = [EN_CODE, 90_37, 3_34_42, 57, 7_52, 1_53, 14, 56, 18, 9, 2]
_UpperCAmelCase : int = self.tokenizer.decode(__a , skip_special_tokens=__a )
_UpperCAmelCase : List[str] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__a )
self.assertEqual(__a , __a )
self.assertNotIn(self.tokenizer.eos_token , __a )
def a__ ( self : Union[str, Any] ):
_UpperCAmelCase : List[Any] = ["""def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])""" * 20]
self.assertIsInstance(src_text[0] , __a )
_UpperCAmelCase : Dict = 10
_UpperCAmelCase : str = self.tokenizer(__a , max_length=__a , truncation=__a ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , __a )
self.assertEqual(len(__a ) , __a )
def a__ ( self : Optional[Any] ):
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "__java__"] ) , [5_00_04, 5_00_01] )
def a__ ( self : int ):
_UpperCAmelCase : Optional[Any] = tempfile.mkdtemp()
_UpperCAmelCase : Optional[Any] = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(__a )
_UpperCAmelCase : str = PLBartTokenizer.from_pretrained(__a )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , __a )
@require_torch
def a__ ( self : Optional[Any] ):
_UpperCAmelCase : Optional[int] = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=__a , return_tensors="pt" )
_UpperCAmelCase : int = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
self.assertEqual(batch.input_ids[1][-2:].tolist() , [2, PYTHON_CODE] )
self.assertEqual(batch.decoder_input_ids[1][0] , __a )
self.assertEqual(batch.decoder_input_ids[1][-1] , 2 )
self.assertEqual(batch.labels[1][-2:].tolist() , [2, EN_CODE] )
@require_torch
def a__ ( self : Optional[int] ):
_UpperCAmelCase : Tuple = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=__a , truncation=__a , max_length=len(self.expected_src_tokens ) , return_tensors="pt" , )
_UpperCAmelCase : Tuple = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id )
self.assertIsInstance(__a , __a )
self.assertEqual((2, 26) , batch.input_ids.shape )
self.assertEqual((2, 26) , batch.attention_mask.shape )
_UpperCAmelCase : List[Any] = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , __a )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, PYTHON_CODE] )
def a__ ( self : Optional[int] ):
_UpperCAmelCase : Dict = self.tokenizer(self.src_text , padding=__a , truncation=__a , max_length=3 , return_tensors="pt" )
_UpperCAmelCase : List[str] = self.tokenizer(
text_target=self.tgt_text , padding=__a , truncation=__a , max_length=10 , return_tensors="pt" )
_UpperCAmelCase : Optional[int] = targets["""input_ids"""]
_UpperCAmelCase : int = shift_tokens_right(__a , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def a__ ( self : List[Any] ):
_UpperCAmelCase : List[str] = self.tokenizer._build_translation_inputs(
"A test" , return_tensors="pt" , src_lang="en_XX" , tgt_lang="java" )
self.assertEqual(
nested_simplify(__a ) , {
# A, test, EOS, en_XX
"input_ids": [[1_50, 2_42, 2, 5_00_03]],
"attention_mask": [[1, 1, 1, 1]],
# java
"forced_bos_token_id": 5_00_01,
} , )
| 720
|
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _UpperCamelCase( SCREAMING_SNAKE_CASE ):
__A: Optional[Any] = ["""image_processor""", """tokenizer"""]
__A: List[str] = """CLIPImageProcessor"""
__A: Dict = ("""CLIPTokenizer""", """CLIPTokenizerFast""")
def __init__( self : str , _lowerCamelCase : List[str]=None , _lowerCamelCase : Any=None , **_lowerCamelCase : str ):
_UpperCAmelCase : Union[str, Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , _lowerCamelCase , )
_UpperCAmelCase : Optional[Any] = kwargs.pop("feature_extractor" )
_UpperCAmelCase : List[str] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(_lowerCamelCase , _lowerCamelCase )
def __call__( self : Any , _lowerCamelCase : List[str]=None , _lowerCamelCase : str=None , _lowerCamelCase : Optional[int]=None , **_lowerCamelCase : Union[str, Any] ):
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
_UpperCAmelCase : Union[str, Any] = self.tokenizer(_lowerCamelCase , return_tensors=_lowerCamelCase , **_lowerCamelCase )
if images is not None:
_UpperCAmelCase : Any = self.image_processor(_lowerCamelCase , return_tensors=_lowerCamelCase , **_lowerCamelCase )
if text is not None and images is not None:
_UpperCAmelCase : Any = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_lowerCamelCase ) , tensor_type=_lowerCamelCase )
def a__ ( self : Union[str, Any] , *_lowerCamelCase : List[Any] , **_lowerCamelCase : List[str] ):
return self.tokenizer.batch_decode(*_lowerCamelCase , **_lowerCamelCase )
def a__ ( self : Union[str, Any] , *_lowerCamelCase : List[str] , **_lowerCamelCase : str ):
return self.tokenizer.decode(*_lowerCamelCase , **_lowerCamelCase )
@property
def a__ ( self : Any ):
_UpperCAmelCase : Union[str, Any] = self.tokenizer.model_input_names
_UpperCAmelCase : Optional[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def a__ ( self : Union[str, Any] ):
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , _lowerCamelCase , )
return self.image_processor_class
@property
def a__ ( self : int ):
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , _lowerCamelCase , )
return self.image_processor
| 328
| 0
|
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
a : List[Any] = logging.get_logger(__name__)
a : Optional[Any] = {
'''microsoft/layoutlmv3-base''': '''https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json''',
}
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE = """layoutlmv3"""
def __init__( self : List[str] , a_ : Any=50_265 , a_ : Tuple=768 , a_ : List[str]=12 , a_ : Dict=12 , a_ : List[Any]=3_072 , a_ : Union[str, Any]="gelu" , a_ : Any=0.1 , a_ : Optional[int]=0.1 , a_ : Optional[int]=512 , a_ : Tuple=2 , a_ : Tuple=0.02 , a_ : str=1e-5 , a_ : List[Any]=1 , a_ : Union[str, Any]=0 , a_ : int=2 , a_ : Optional[Any]=1_024 , a_ : Tuple=128 , a_ : Any=128 , a_ : Tuple=True , a_ : List[Any]=32 , a_ : Optional[Any]=128 , a_ : int=64 , a_ : Optional[Any]=256 , a_ : Any=True , a_ : Dict=True , a_ : Optional[int]=True , a_ : List[Any]=224 , a_ : Any=3 , a_ : Union[str, Any]=16 , a_ : int=None , **a_ : Tuple , ):
"""simple docstring"""
super().__init__(
vocab_size=a_ , hidden_size=a_ , num_hidden_layers=a_ , num_attention_heads=a_ , intermediate_size=a_ , hidden_act=a_ , hidden_dropout_prob=a_ , attention_probs_dropout_prob=a_ , max_position_embeddings=a_ , type_vocab_size=a_ , initializer_range=a_ , layer_norm_eps=a_ , pad_token_id=a_ , bos_token_id=a_ , eos_token_id=a_ , **a_ , )
__snake_case = max_ad_position_embeddings
__snake_case = coordinate_size
__snake_case = shape_size
__snake_case = has_relative_attention_bias
__snake_case = rel_pos_bins
__snake_case = max_rel_pos
__snake_case = has_spatial_attention_bias
__snake_case = rel_ad_pos_bins
__snake_case = max_rel_ad_pos
__snake_case = text_embed
__snake_case = visual_embed
__snake_case = input_size
__snake_case = num_channels
__snake_case = patch_size
__snake_case = classifier_dropout
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE = version.parse("""1.12""" )
@property
def A ( self : Any ):
"""simple docstring"""
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
("input_ids", {0: "batch", 1: "sequence"}),
("attention_mask", {0: "batch", 1: "sequence"}),
("bbox", {0: "batch", 1: "sequence"}),
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
else:
return OrderedDict(
[
("input_ids", {0: "batch", 1: "sequence"}),
("bbox", {0: "batch", 1: "sequence"}),
("attention_mask", {0: "batch", 1: "sequence"}),
("pixel_values", {0: "batch", 1: "num_channels"}),
] )
@property
def A ( self : str ):
"""simple docstring"""
return 1e-5
@property
def A ( self : Optional[int] ):
"""simple docstring"""
return 12
def A ( self : Dict , a_ : "ProcessorMixin" , a_ : int = -1 , a_ : int = -1 , a_ : bool = False , a_ : Optional["TensorType"] = None , a_ : int = 3 , a_ : int = 40 , a_ : int = 40 , ):
"""simple docstring"""
setattr(processor.image_processor , "apply_ocr" , a_ )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
__snake_case = compute_effective_axis_dimension(
a_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__snake_case = processor.tokenizer.num_special_tokens_to_add(a_ )
__snake_case = compute_effective_axis_dimension(
a_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=a_ )
# Generate dummy inputs according to compute batch and sequence
__snake_case = [[" ".join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
__snake_case = [[[48, 84, 73, 128]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
__snake_case = self._generate_dummy_images(a_ , a_ , a_ , a_ )
__snake_case = dict(
processor(
a_ , text=a_ , boxes=a_ , return_tensors=a_ , ) )
return inputs
| 69
|
from .glue import GlueDataset, GlueDataTrainingArguments
from .language_modeling import (
LineByLineTextDataset,
LineByLineWithRefDataset,
LineByLineWithSOPTextDataset,
TextDataset,
TextDatasetForNextSentencePrediction,
)
from .squad import SquadDataset, SquadDataTrainingArguments
| 477
| 0
|
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
_lowercase: int = logging.get_logger(__name__)
_lowercase: Optional[Any] = {
'''linear''': get_linear_schedule_with_warmup,
'''cosine''': get_cosine_schedule_with_warmup,
'''cosine_w_restarts''': get_cosine_with_hard_restarts_schedule_with_warmup,
'''polynomial''': get_polynomial_decay_schedule_with_warmup,
'''constant''': get_constant_schedule,
'''constant_w_warmup''': get_constant_schedule_with_warmup,
}
class lowerCamelCase__ ( UpperCAmelCase ):
def __init__( self : List[Any] , lowercase__ : Any=None , lowercase__ : List[Any]=None , *lowercase__ : Dict , **lowercase__ : str ):
super().__init__(*lowercase__ , **lowercase__ )
if config is None:
assert isinstance(self.model , lowercase__ ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
f' {self.model.__class__}'
)
_lowerCAmelCase = self.model.config
else:
_lowerCAmelCase = config
_lowerCAmelCase = data_args
_lowerCAmelCase = self.config.tgt_vocab_size if isinstance(self.config , lowercase__ ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
f'The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for'
' padding..' )
if self.args.label_smoothing == 0:
_lowerCAmelCase = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
_lowerCAmelCase = label_smoothed_nll_loss
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , lowercase__ : int ):
if self.optimizer is None:
_lowerCAmelCase = ['bias', 'LayerNorm.weight']
_lowerCAmelCase = [
{
'params': [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
'weight_decay': self.args.weight_decay,
},
{
'params': [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
'weight_decay': 0.0,
},
]
_lowerCAmelCase = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
_lowerCAmelCase = Adafactor
_lowerCAmelCase = {'scale_parameter': False, 'relative_step': False}
else:
_lowerCAmelCase = AdamW
_lowerCAmelCase = {
'betas': (self.args.adam_betaa, self.args.adam_betaa),
'eps': self.args.adam_epsilon,
}
_lowerCAmelCase = self.args.learning_rate
if self.sharded_ddp:
_lowerCAmelCase = OSS(
params=lowercase__ , optim=lowercase__ , **lowercase__ , )
else:
_lowerCAmelCase = optimizer_cls(lowercase__ , **lowercase__ )
if self.lr_scheduler is None:
_lowerCAmelCase = self._get_lr_scheduler(lowercase__ )
else: # ignoring --lr_scheduler
logger.warning('scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.' )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , lowercase__ : str ):
_lowerCAmelCase = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
_lowerCAmelCase = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
_lowerCAmelCase = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps )
else:
_lowerCAmelCase = schedule_func(
self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=lowercase__ )
return scheduler
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
if isinstance(self.train_dataset , torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , )
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def SCREAMING_SNAKE_CASE__ ( self : Tuple , lowercase__ : Tuple , lowercase__ : int , lowercase__ : Union[str, Any] ):
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
_lowerCAmelCase = model(**lowercase__ , use_cache=lowercase__ )[0]
_lowerCAmelCase = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) )
else:
# compute usual loss via models
_lowerCAmelCase , _lowerCAmelCase = model(**lowercase__ , labels=lowercase__ , use_cache=lowercase__ )[:2]
else:
# compute label smoothed loss
_lowerCAmelCase = model(**lowercase__ , use_cache=lowercase__ )[0]
_lowerCAmelCase = torch.nn.functional.log_softmax(lowercase__ , dim=-1 )
_lowerCAmelCase , _lowerCAmelCase = self.loss_fn(lowercase__ , lowercase__ , self.args.label_smoothing , ignore_index=self.config.pad_token_id )
return loss, logits
def SCREAMING_SNAKE_CASE__ ( self : str , lowercase__ : List[str] , lowercase__ : Optional[Any] ):
_lowerCAmelCase = inputs.pop('labels' )
_lowerCAmelCase , _lowerCAmelCase = self._compute_loss(lowercase__ , lowercase__ , lowercase__ )
return loss
def SCREAMING_SNAKE_CASE__ ( self : Tuple , lowercase__ : nn.Module , lowercase__ : Dict[str, Union[torch.Tensor, Any]] , lowercase__ : bool , lowercase__ : Optional[List[str]] = None , ):
_lowerCAmelCase = self._prepare_inputs(lowercase__ )
_lowerCAmelCase = {
'max_length': self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
'num_beams': self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
_lowerCAmelCase = self.model.generate(
inputs['input_ids'] , attention_mask=inputs['attention_mask'] , **lowercase__ , )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
_lowerCAmelCase = self._pad_tensors_to_max_len(lowercase__ , gen_kwargs['max_length'] )
_lowerCAmelCase = inputs.pop('labels' )
with torch.no_grad():
# compute loss on predict data
_lowerCAmelCase , _lowerCAmelCase = self._compute_loss(lowercase__ , lowercase__ , lowercase__ )
_lowerCAmelCase = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
_lowerCAmelCase = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
_lowerCAmelCase = self._pad_tensors_to_max_len(lowercase__ , gen_kwargs['max_length'] )
return (loss, logits, labels)
def SCREAMING_SNAKE_CASE__ ( self : Tuple , lowercase__ : Any , lowercase__ : Optional[int] ):
# If PAD token is not defined at least EOS token has to be defined
_lowerCAmelCase = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
'Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be'
f' padded to `max_length`={max_length}' )
_lowerCAmelCase = pad_token_id * torch.ones(
(tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device )
_lowerCAmelCase = tensor
return padded_tensor
| 225
|
from __future__ import annotations
def _lowerCamelCase ( snake_case ):
_lowerCAmelCase = len(snake_case )
# We need to create solution object to save path.
_lowerCAmelCase = [[0 for _ in range(snake_case )] for _ in range(snake_case )]
_lowerCAmelCase = run_maze(snake_case , 0 , 0 , snake_case )
if solved:
print('\n'.join(str(snake_case ) for row in solutions ) )
else:
print('No solution exists!' )
return solved
def _lowerCamelCase ( snake_case , snake_case , snake_case , snake_case ):
_lowerCAmelCase = len(snake_case )
# Final check point.
if i == j == (size - 1):
_lowerCAmelCase = 1
return True
_lowerCAmelCase = (not i < 0) and (not j < 0) # Check lower bounds
_lowerCAmelCase = (i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
_lowerCAmelCase = (not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
_lowerCAmelCase = 1
# check for directions
if (
run_maze(snake_case , i + 1 , snake_case , snake_case )
or run_maze(snake_case , snake_case , j + 1 , snake_case )
or run_maze(snake_case , i - 1 , snake_case , snake_case )
or run_maze(snake_case , snake_case , j - 1 , snake_case )
):
return True
_lowerCAmelCase = 0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 225
| 1
|
"""simple docstring"""
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(snake_case ) , '''Tatoeba directory does not exist.''' )
class lowerCamelCase__ ( unittest.TestCase ):
@cached_property
def _UpperCamelCase ( self ):
UpperCAmelCase = tempfile.mkdtemp()
return TatoebaConverter(save_dir=A )
@slow
def _UpperCamelCase ( self ):
self.resolver.convert_models(["""heb-eng"""] )
@slow
def _UpperCamelCase ( self ):
UpperCAmelCase , UpperCAmelCase = self.resolver.write_model_card("""opus-mt-he-en""" ,dry_run=A )
assert mmeta["long_pair"] == "heb-eng"
| 341
|
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class lowerCamelCase__ :
def __init__( self ,A ,A=13 ,A=7 ,A=True ,A=True ,A=True ,A=99 ,A=32 ,A=5 ,A=4 ,A=37 ,A="gelu" ,A=0.1 ,A=0.1 ,A=512 ,A=16 ,A=2 ,A=0.02 ,A=3 ,A=4 ,A=None ,):
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = seq_length
UpperCAmelCase = is_training
UpperCAmelCase = use_token_type_ids
UpperCAmelCase = use_labels
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = type_vocab_size
UpperCAmelCase = type_sequence_label_size
UpperCAmelCase = initializer_range
UpperCAmelCase = num_labels
UpperCAmelCase = num_choices
UpperCAmelCase = scope
UpperCAmelCase = self.vocab_size - 1
def _UpperCamelCase ( self ):
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
UpperCAmelCase = None
if self.use_token_type_ids:
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
if self.use_labels:
UpperCAmelCase = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
UpperCAmelCase = ids_tensor([self.batch_size] ,self.num_choices )
UpperCAmelCase = OpenAIGPTConfig(
vocab_size=self.vocab_size ,n_embd=self.hidden_size ,n_layer=self.num_hidden_layers ,n_head=self.num_attention_heads ,n_positions=self.max_position_embeddings ,pad_token_id=self.pad_token_id ,)
UpperCAmelCase = ids_tensor([self.num_hidden_layers, self.num_attention_heads] ,2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def _UpperCamelCase ( self ,A ,A ,A ,A ,*A ):
UpperCAmelCase = OpenAIGPTModel(config=A )
model.to(A )
model.eval()
UpperCAmelCase = model(A ,token_type_ids=A ,head_mask=A )
UpperCAmelCase = model(A ,token_type_ids=A )
UpperCAmelCase = model(A )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase ( self ,A ,A ,A ,A ,*A ):
UpperCAmelCase = OpenAIGPTLMHeadModel(A )
model.to(A )
model.eval()
UpperCAmelCase = model(A ,token_type_ids=A ,labels=A )
self.parent.assertEqual(result.loss.shape ,() )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCamelCase ( self ,A ,A ,A ,A ,*A ):
UpperCAmelCase = OpenAIGPTDoubleHeadsModel(A )
model.to(A )
model.eval()
UpperCAmelCase = model(A ,token_type_ids=A ,labels=A )
self.parent.assertEqual(result.loss.shape ,() )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCamelCase ( self ,A ,A ,A ,A ,*A ):
UpperCAmelCase = self.num_labels
UpperCAmelCase = OpenAIGPTForSequenceClassification(A )
model.to(A )
model.eval()
UpperCAmelCase = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
UpperCAmelCase = model(A ,token_type_ids=A ,labels=A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.prepare_config_and_inputs()
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) = config_and_inputs
UpperCAmelCase = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""head_mask""": head_mask,
}
return config, inputs_dict
@require_torch
class lowerCamelCase__ ( snake_case , snake_case , snake_case , unittest.TestCase ):
SCREAMING_SNAKE_CASE = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
SCREAMING_SNAKE_CASE = (
{
'''feature-extraction''': OpenAIGPTModel,
'''text-classification''': OpenAIGPTForSequenceClassification,
'''text-generation''': OpenAIGPTLMHeadModel,
'''zero-shot''': OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def _UpperCamelCase ( self ,A ,A ,A ,A ,A ):
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def _UpperCamelCase ( self ,A ,A ,A=False ):
UpperCAmelCase = super()._prepare_for_class(A ,A ,return_labels=A )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
UpperCAmelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) ,dtype=torch.long ,device=A ,)
UpperCAmelCase = inputs_dict["""labels"""]
UpperCAmelCase = inputs_dict["""labels"""]
UpperCAmelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) ,dtype=torch.long ,device=A ,)
UpperCAmelCase = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=A )
return inputs_dict
def _UpperCamelCase ( self ):
UpperCAmelCase = OpenAIGPTModelTester(self )
UpperCAmelCase = ConfigTester(self ,config_class=A ,n_embd=37 )
def _UpperCamelCase ( self ):
self.config_tester.run_common_tests()
def _UpperCamelCase ( self ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*A )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*A )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*A )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*A )
@slow
def _UpperCamelCase ( self ):
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase = OpenAIGPTModel.from_pretrained(A )
self.assertIsNotNone(A )
@require_torch
class lowerCamelCase__ ( unittest.TestCase ):
@slow
def _UpperCamelCase ( self ):
UpperCAmelCase = OpenAIGPTLMHeadModel.from_pretrained("""openai-gpt""" )
model.to(A )
UpperCAmelCase = torch.tensor([[481, 4_735, 544]] ,dtype=torch.long ,device=A ) # the president is
UpperCAmelCase = [
481,
4_735,
544,
246,
963,
870,
762,
239,
244,
40_477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
UpperCAmelCase = model.generate(A ,do_sample=A )
self.assertListEqual(output_ids[0].tolist() ,A )
| 341
| 1
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
SCREAMING_SNAKE_CASE_ = {
"vocab_file": {
"squeezebert/squeezebert-uncased": (
"https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt"
),
"squeezebert/squeezebert-mnli": "https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt",
"squeezebert/squeezebert-mnli-headless": (
"https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"squeezebert/squeezebert-uncased": (
"https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json"
),
"squeezebert/squeezebert-mnli": (
"https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json"
),
"squeezebert/squeezebert-mnli-headless": (
"https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json"
),
},
}
SCREAMING_SNAKE_CASE_ = {
"squeezebert/squeezebert-uncased": 5_12,
"squeezebert/squeezebert-mnli": 5_12,
"squeezebert/squeezebert-mnli-headless": 5_12,
}
SCREAMING_SNAKE_CASE_ = {
"squeezebert/squeezebert-uncased": {"do_lower_case": True},
"squeezebert/squeezebert-mnli": {"do_lower_case": True},
"squeezebert/squeezebert-mnli-headless": {"do_lower_case": True},
}
class lowerCAmelCase_ ( UpperCamelCase_ ):
"""simple docstring"""
a_ :Dict =VOCAB_FILES_NAMES
a_ :Dict =PRETRAINED_VOCAB_FILES_MAP
a_ :List[str] =PRETRAINED_INIT_CONFIGURATION
a_ :Optional[int] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ :List[Any] =SqueezeBertTokenizer
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Tuple=None , SCREAMING_SNAKE_CASE__ : Tuple=None , SCREAMING_SNAKE_CASE__ : List[Any]=True , SCREAMING_SNAKE_CASE__ : List[str]="[UNK]" , SCREAMING_SNAKE_CASE__ : List[str]="[SEP]" , SCREAMING_SNAKE_CASE__ : List[Any]="[PAD]" , SCREAMING_SNAKE_CASE__ : Optional[Any]="[CLS]" , SCREAMING_SNAKE_CASE__ : Optional[int]="[MASK]" , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : List[Any]=None , **SCREAMING_SNAKE_CASE__ : Optional[Any] , ):
'''simple docstring'''
super().__init__(
_a , tokenizer_file=_a , do_lower_case=_a , unk_token=_a , sep_token=_a , pad_token=_a , cls_token=_a , mask_token=_a , tokenize_chinese_chars=_a , strip_accents=_a , **_a , )
__a = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , _a ) != do_lower_case
or normalizer_state.get("""strip_accents""" , _a ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , _a ) != tokenize_chinese_chars
):
__a = getattr(_a , normalizer_state.pop("""type""" ) )
__a = do_lower_case
__a = strip_accents
__a = tokenize_chinese_chars
__a = normalizer_class(**_a )
__a = do_lower_case
def __a ( self : Tuple , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[int]=None ):
'''simple docstring'''
__a = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __a ( self : List[str] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int] = None ):
'''simple docstring'''
__a = [self.sep_token_id]
__a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __a ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Any = None ):
'''simple docstring'''
__a = self._tokenizer.model.save(_a , name=_a )
return tuple(_a )
| 712
|
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TextClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
SCREAMING_SNAKE_CASE_ = {'LayoutLMv2Config', 'LayoutLMv3Config'}
@is_pipeline_test
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
a_ :List[Any] =MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
a_ :List[Any] =TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
a_ :List[Any] ={config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
a_ :Tuple ={
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
@require_torch
def __a ( self : int ):
'''simple docstring'''
__a = pipeline(
task="""text-classification""" , model="""hf-internal-testing/tiny-random-distilbert""" , framework="""pt""" )
__a = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE__ ) , [{"""label""": """LABEL_0""", """score""": 0.5_0_4}] )
__a = text_classifier("""This is great !""" , top_k=2 )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ ) , [{"""label""": """LABEL_0""", """score""": 0.5_0_4}, {"""label""": """LABEL_1""", """score""": 0.4_9_6}] )
__a = text_classifier(["""This is great !""", """This is bad"""] , top_k=2 )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ ) , [
[{"""label""": """LABEL_0""", """score""": 0.5_0_4}, {"""label""": """LABEL_1""", """score""": 0.4_9_6}],
[{"""label""": """LABEL_0""", """score""": 0.5_0_4}, {"""label""": """LABEL_1""", """score""": 0.4_9_6}],
] , )
__a = text_classifier("""This is great !""" , top_k=1 )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE__ ) , [{"""label""": """LABEL_0""", """score""": 0.5_0_4}] )
# Legacy behavior
__a = text_classifier("""This is great !""" , return_all_scores=SCREAMING_SNAKE_CASE__ )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE__ ) , [{"""label""": """LABEL_0""", """score""": 0.5_0_4}] )
__a = text_classifier("""This is great !""" , return_all_scores=SCREAMING_SNAKE_CASE__ )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ ) , [[{"""label""": """LABEL_0""", """score""": 0.5_0_4}, {"""label""": """LABEL_1""", """score""": 0.4_9_6}]] )
__a = text_classifier(["""This is great !""", """Something else"""] , return_all_scores=SCREAMING_SNAKE_CASE__ )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ ) , [
[{"""label""": """LABEL_0""", """score""": 0.5_0_4}, {"""label""": """LABEL_1""", """score""": 0.4_9_6}],
[{"""label""": """LABEL_0""", """score""": 0.5_0_4}, {"""label""": """LABEL_1""", """score""": 0.4_9_6}],
] , )
__a = text_classifier(["""This is great !""", """Something else"""] , return_all_scores=SCREAMING_SNAKE_CASE__ )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ ) , [
{"""label""": """LABEL_0""", """score""": 0.5_0_4},
{"""label""": """LABEL_0""", """score""": 0.5_0_4},
] , )
@require_torch
def __a ( self : Optional[int] ):
'''simple docstring'''
import torch
__a = pipeline(
task="""text-classification""" , model="""hf-internal-testing/tiny-random-distilbert""" , framework="""pt""" , device=torch.device("""cpu""" ) , )
__a = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE__ ) , [{"""label""": """LABEL_0""", """score""": 0.5_0_4}] )
@require_tf
def __a ( self : List[str] ):
'''simple docstring'''
__a = pipeline(
task="""text-classification""" , model="""hf-internal-testing/tiny-random-distilbert""" , framework="""tf""" )
__a = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE__ ) , [{"""label""": """LABEL_0""", """score""": 0.5_0_4}] )
@slow
@require_torch
def __a ( self : int ):
'''simple docstring'''
__a = pipeline("""text-classification""" )
__a = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE__ ) , [{"""label""": """POSITIVE""", """score""": 1.0}] )
__a = text_classifier("""This is bad !""" )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE__ ) , [{"""label""": """NEGATIVE""", """score""": 1.0}] )
__a = text_classifier("""Birds are a type of animal""" )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE__ ) , [{"""label""": """POSITIVE""", """score""": 0.9_8_8}] )
@slow
@require_tf
def __a ( self : Union[str, Any] ):
'''simple docstring'''
__a = pipeline("""text-classification""" , framework="""tf""" )
__a = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE__ ) , [{"""label""": """POSITIVE""", """score""": 1.0}] )
__a = text_classifier("""This is bad !""" )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE__ ) , [{"""label""": """NEGATIVE""", """score""": 1.0}] )
__a = text_classifier("""Birds are a type of animal""" )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE__ ) , [{"""label""": """POSITIVE""", """score""": 0.9_8_8}] )
def __a ( self : Tuple , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Tuple ):
'''simple docstring'''
__a = TextClassificationPipeline(model=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ )
return text_classifier, ["HuggingFace is in", "This is another test"]
def __a ( self : Any , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Tuple ):
'''simple docstring'''
__a = text_classifier.model
# Small inputs because BartTokenizer tiny has maximum position embeddings = 22
__a = """HuggingFace is in"""
__a = text_classifier(SCREAMING_SNAKE_CASE__ )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE__ ) , [{"""label""": ANY(SCREAMING_SNAKE_CASE__ ), """score""": ANY(SCREAMING_SNAKE_CASE__ )}] )
self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() )
__a = ["""HuggingFace is in """, """Paris is in France"""]
__a = text_classifier(SCREAMING_SNAKE_CASE__ )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ ) , [{"""label""": ANY(SCREAMING_SNAKE_CASE__ ), """score""": ANY(SCREAMING_SNAKE_CASE__ )}, {"""label""": ANY(SCREAMING_SNAKE_CASE__ ), """score""": ANY(SCREAMING_SNAKE_CASE__ )}] , )
self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() )
self.assertTrue(outputs[1]["""label"""] in model.config.idalabel.values() )
# Forcing to get all results with `top_k=None`
# This is NOT the legacy format
__a = text_classifier(SCREAMING_SNAKE_CASE__ , top_k=SCREAMING_SNAKE_CASE__ )
__a = len(model.config.idalabel.values() )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ ) , [[{"""label""": ANY(SCREAMING_SNAKE_CASE__ ), """score""": ANY(SCREAMING_SNAKE_CASE__ )}] * N, [{"""label""": ANY(SCREAMING_SNAKE_CASE__ ), """score""": ANY(SCREAMING_SNAKE_CASE__ )}] * N] , )
__a = {"""text""": """HuggingFace is in """, """text_pair""": """Paris is in France"""}
__a = text_classifier(SCREAMING_SNAKE_CASE__ )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ ) , {"""label""": ANY(SCREAMING_SNAKE_CASE__ ), """score""": ANY(SCREAMING_SNAKE_CASE__ )} , )
self.assertTrue(outputs["""label"""] in model.config.idalabel.values() )
# This might be used a text pair, but tokenizer + pipe interaction
# makes it hard to understand that it's not using the pair properly
# https://github.com/huggingface/transformers/issues/17305
# We disabled this usage instead as it was outputting wrong outputs.
__a = [["""HuggingFace is in """, """Paris is in France"""]]
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
text_classifier(SCREAMING_SNAKE_CASE__ )
# This used to be valid for doing text pairs
# We're keeping it working because of backward compatibility
__a = text_classifier([[["""HuggingFace is in """, """Paris is in France"""]]] )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ ) , [{"""label""": ANY(SCREAMING_SNAKE_CASE__ ), """score""": ANY(SCREAMING_SNAKE_CASE__ )}] , )
self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() )
| 201
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowercase = {'''configuration_xglm''': ['''XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XGLMConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = ['''XGLMTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = ['''XGLMTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XGLMForCausalLM''',
'''XGLMModel''',
'''XGLMPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''FlaxXGLMForCausalLM''',
'''FlaxXGLMModel''',
'''FlaxXGLMPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXGLMForCausalLM''',
'''TFXGLMModel''',
'''TFXGLMPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 91
|
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
SCREAMING_SNAKE_CASE : List[str] = numpy.array([0, 0])
SCREAMING_SNAKE_CASE : Tuple = numpy.array([0.5, 0.8660254])
SCREAMING_SNAKE_CASE : Tuple = numpy.array([1, 0])
SCREAMING_SNAKE_CASE : List[str] = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def lowerCamelCase_ ( __UpperCamelCase , __UpperCamelCase ):
A_ = initial_vectors
for _ in range(__UpperCamelCase ):
A_ = iteration_step(__UpperCamelCase )
return vectors
def lowerCamelCase_ ( __UpperCamelCase ):
A_ = []
for i, start_vector in enumerate(vectors[:-1] ):
A_ = vectors[i + 1]
new_vectors.append(__UpperCamelCase )
A_ = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 60 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def lowerCamelCase_ ( __UpperCamelCase , __UpperCamelCase ):
A_ = numpy.radians(__UpperCamelCase )
A_ , A_ = numpy.cos(__UpperCamelCase ), numpy.sin(__UpperCamelCase )
A_ = numpy.array(((c, -s), (s, c)) )
return numpy.dot(__UpperCamelCase , __UpperCamelCase )
def lowerCamelCase_ ( __UpperCamelCase ):
A_ = plt.gca()
axes.set_aspect('''equal''' )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
A_ , A_ = zip(*__UpperCamelCase )
plt.plot(__UpperCamelCase , __UpperCamelCase )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE : Optional[Any] = iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 141
| 0
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = """microsoft/speecht5_tts"""
a_ = (
"""This is a tool that reads an English text out loud. It takes an input named `text` which should contain the """
"""text to read (in English) and returns a waveform object containing the sound."""
)
a_ = """text_reader"""
a_ = SpeechTaProcessor
a_ = SpeechTaForTextToSpeech
a_ = SpeechTaHifiGan
a_ = ["""text"""]
a_ = ["""audio"""]
def _a ( self : Tuple ):
'''simple docstring'''
if self.post_processor is None:
A_ : int = """microsoft/speecht5_hifigan"""
super().setup()
def _a ( self : Any ,_a : Optional[int] ,_a : List[str]=None ):
'''simple docstring'''
A_ : Any = self.pre_processor(text=_a ,return_tensors="""pt""" ,truncation=_a )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError("""Datasets needs to be installed if not passing speaker embeddings.""" )
A_ : Dict = load_dataset("""Matthijs/cmu-arctic-xvectors""" ,split="""validation""" )
A_ : Union[str, Any] = torch.tensor(embeddings_dataset[7305]["""xvector"""] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def _a ( self : Any ,_a : List[str] ):
'''simple docstring'''
with torch.no_grad():
return self.model.generate_speech(**_a )
def _a ( self : int ,_a : List[str] ):
'''simple docstring'''
with torch.no_grad():
return self.post_processor(_a ).cpu().detach()
| 27
|
'''simple docstring'''
from __future__ import annotations
def lowerCamelCase ( lowerCamelCase : dict , lowerCamelCase : str):
A_ , A_ : List[Any] = set(lowerCamelCase), [start]
while stack:
A_ : Optional[Any] = stack.pop()
explored.add(lowerCamelCase)
# Differences from BFS:
# 1) pop last element instead of first one
# 2) add adjacent elements to stack without exploring them
for adj in reversed(graph[v]):
if adj not in explored:
stack.append(lowerCamelCase)
return explored
__magic_name__ = {
'A': ['B', 'C', 'D'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F'],
'D': ['B', 'D'],
'E': ['B', 'F'],
'F': ['C', 'E', 'G'],
'G': ['F'],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
print(depth_first_search(G, 'A'))
| 27
| 1
|
snake_case = {
"""Pillow""": """Pillow<10.0.0""",
"""accelerate""": """accelerate>=0.20.3""",
"""av""": """av==9.2.0""",
"""beautifulsoup4""": """beautifulsoup4""",
"""black""": """black~=23.1""",
"""codecarbon""": """codecarbon==1.2.0""",
"""cookiecutter""": """cookiecutter==1.7.3""",
"""dataclasses""": """dataclasses""",
"""datasets""": """datasets!=2.5.0""",
"""decord""": """decord==0.6.0""",
"""deepspeed""": """deepspeed>=0.9.3""",
"""diffusers""": """diffusers""",
"""dill""": """dill<0.3.5""",
"""evaluate""": """evaluate>=0.2.0""",
"""fairscale""": """fairscale>0.3""",
"""faiss-cpu""": """faiss-cpu""",
"""fastapi""": """fastapi""",
"""filelock""": """filelock""",
"""flax""": """flax>=0.4.1,<=0.7.0""",
"""ftfy""": """ftfy""",
"""fugashi""": """fugashi>=1.0""",
"""GitPython""": """GitPython<3.1.19""",
"""hf-doc-builder""": """hf-doc-builder>=0.3.0""",
"""huggingface-hub""": """huggingface-hub>=0.14.1,<1.0""",
"""importlib_metadata""": """importlib_metadata""",
"""ipadic""": """ipadic>=1.0.0,<2.0""",
"""isort""": """isort>=5.5.4""",
"""jax""": """jax>=0.2.8,!=0.3.2,<=0.4.13""",
"""jaxlib""": """jaxlib>=0.1.65,<=0.4.13""",
"""jieba""": """jieba""",
"""kenlm""": """kenlm""",
"""keras-nlp""": """keras-nlp>=0.3.1""",
"""librosa""": """librosa""",
"""nltk""": """nltk""",
"""natten""": """natten>=0.14.6""",
"""numpy""": """numpy>=1.17""",
"""onnxconverter-common""": """onnxconverter-common""",
"""onnxruntime-tools""": """onnxruntime-tools>=1.4.2""",
"""onnxruntime""": """onnxruntime>=1.4.0""",
"""opencv-python""": """opencv-python""",
"""optuna""": """optuna""",
"""optax""": """optax>=0.0.8,<=0.1.4""",
"""packaging""": """packaging>=20.0""",
"""parameterized""": """parameterized""",
"""phonemizer""": """phonemizer""",
"""protobuf""": """protobuf""",
"""psutil""": """psutil""",
"""pyyaml""": """pyyaml>=5.1""",
"""pydantic""": """pydantic<2""",
"""pytest""": """pytest>=7.2.0""",
"""pytest-timeout""": """pytest-timeout""",
"""pytest-xdist""": """pytest-xdist""",
"""python""": """python>=3.8.0""",
"""ray[tune]""": """ray[tune]""",
"""regex""": """regex!=2019.12.17""",
"""requests""": """requests""",
"""rhoknp""": """rhoknp>=1.1.0,<1.3.1""",
"""rjieba""": """rjieba""",
"""rouge-score""": """rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1""",
"""ruff""": """ruff>=0.0.241,<=0.0.259""",
"""sacrebleu""": """sacrebleu>=1.4.12,<2.0.0""",
"""sacremoses""": """sacremoses""",
"""safetensors""": """safetensors>=0.3.1""",
"""sagemaker""": """sagemaker>=2.31.0""",
"""scikit-learn""": """scikit-learn""",
"""sentencepiece""": """sentencepiece>=0.1.91,!=0.1.92""",
"""sigopt""": """sigopt""",
"""starlette""": """starlette""",
"""sudachipy""": """sudachipy>=0.6.6""",
"""sudachidict_core""": """sudachidict_core>=20220729""",
"""tensorflow-cpu""": """tensorflow-cpu>=2.6,<2.14""",
"""tensorflow""": """tensorflow>=2.6,<2.14""",
"""tensorflow-text""": """tensorflow-text<2.14""",
"""tf2onnx""": """tf2onnx""",
"""timeout-decorator""": """timeout-decorator""",
"""timm""": """timm""",
"""tokenizers""": """tokenizers>=0.11.1,!=0.11.3,<0.14""",
"""torch""": """torch>=1.9,!=1.12.0""",
"""torchaudio""": """torchaudio""",
"""torchvision""": """torchvision""",
"""pyctcdecode""": """pyctcdecode>=0.4.0""",
"""tqdm""": """tqdm>=4.27""",
"""unidic""": """unidic>=1.0.2""",
"""unidic_lite""": """unidic_lite>=1.0.7""",
"""urllib3""": """urllib3<2.0.0""",
"""uvicorn""": """uvicorn""",
}
| 62
|
from typing import Any
class __SCREAMING_SNAKE_CASE :
def __init__( self, _a ) -> Any:
__SCREAMING_SNAKE_CASE = data
__SCREAMING_SNAKE_CASE = None
def __repr__( self ) -> str:
return f'''Node({self.data})'''
class __SCREAMING_SNAKE_CASE :
def __init__( self ) -> Tuple:
__SCREAMING_SNAKE_CASE = None
def __iter__( self ) -> Any:
__SCREAMING_SNAKE_CASE = self.head
while node:
yield node.data
__SCREAMING_SNAKE_CASE = node.next
def __len__( self ) -> int:
return sum(1 for _ in self )
def __repr__( self ) -> str:
return "->".join([str(_a ) for item in self] )
def __getitem__( self, _a ) -> Any:
if not 0 <= index < len(self ):
raise ValueError("list index out of range." )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self, _a, _a ) -> None:
if not 0 <= index < len(self ):
raise ValueError("list index out of range." )
__SCREAMING_SNAKE_CASE = self.head
for _ in range(_a ):
__SCREAMING_SNAKE_CASE = current.next
__SCREAMING_SNAKE_CASE = data
def __lowerCAmelCase ( self, _a ) -> None:
self.insert_nth(len(self ), _a )
def __lowerCAmelCase ( self, _a ) -> None:
self.insert_nth(0, _a )
def __lowerCAmelCase ( self, _a, _a ) -> None:
if not 0 <= index <= len(self ):
raise IndexError("list index out of range" )
__SCREAMING_SNAKE_CASE = Node(_a )
if self.head is None:
__SCREAMING_SNAKE_CASE = new_node
elif index == 0:
__SCREAMING_SNAKE_CASE = self.head # link new_node to head
__SCREAMING_SNAKE_CASE = new_node
else:
__SCREAMING_SNAKE_CASE = self.head
for _ in range(index - 1 ):
__SCREAMING_SNAKE_CASE = temp.next
__SCREAMING_SNAKE_CASE = temp.next
__SCREAMING_SNAKE_CASE = new_node
def __lowerCAmelCase ( self ) -> None: # print every node data
print(self )
def __lowerCAmelCase ( self ) -> Any:
return self.delete_nth(0 )
def __lowerCAmelCase ( self ) -> Any: # delete from tail
return self.delete_nth(len(self ) - 1 )
def __lowerCAmelCase ( self, _a = 0 ) -> Any:
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError("List index out of range." )
__SCREAMING_SNAKE_CASE = self.head # default first node
if index == 0:
__SCREAMING_SNAKE_CASE = self.head.next
else:
__SCREAMING_SNAKE_CASE = self.head
for _ in range(index - 1 ):
__SCREAMING_SNAKE_CASE = temp.next
__SCREAMING_SNAKE_CASE = temp.next
__SCREAMING_SNAKE_CASE = temp.next.next
return delete_node.data
def __lowerCAmelCase ( self ) -> bool:
return self.head is None
def __lowerCAmelCase ( self ) -> None:
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = self.head
while current:
# Store the current node's next node.
__SCREAMING_SNAKE_CASE = current.next
# Make the current node's next point backwards
__SCREAMING_SNAKE_CASE = prev
# Make the previous node be the current node
__SCREAMING_SNAKE_CASE = current
# Make the current node the next node (to progress iteration)
__SCREAMING_SNAKE_CASE = next_node
# Return prev in order to put the head at the end
__SCREAMING_SNAKE_CASE = prev
def _A ( ) -> None:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = LinkedList()
assert linked_list.is_empty() is True
assert str(__snake_case ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10 ):
assert len(__snake_case ) == i
linked_list.insert_nth(__snake_case , i + 1 )
assert str(__snake_case ) == "->".join(str(__snake_case ) for i in range(1 , 11 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(11 )
assert str(__snake_case ) == "->".join(str(__snake_case ) for i in range(0 , 12 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 10
assert linked_list.delete_tail() == 11
assert len(__snake_case ) == 9
assert str(__snake_case ) == "->".join(str(__snake_case ) for i in range(1 , 10 ) )
assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True
for i in range(0 , 9 ):
__SCREAMING_SNAKE_CASE = -i
assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True
linked_list.reverse()
assert str(__snake_case ) == "->".join(str(__snake_case ) for i in range(-8 , 1 ) )
def _A ( ) -> None:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = [
-9,
100,
Node(7734_5112 ),
"dlrow olleH",
7,
5555,
0,
-1_9_2.5_5_5_5_5,
"Hello, world!",
7_7.9,
Node(10 ),
None,
None,
1_2.2_0,
]
__SCREAMING_SNAKE_CASE = LinkedList()
for i in test_input:
linked_list.insert_tail(__snake_case )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(__snake_case ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
__SCREAMING_SNAKE_CASE = linked_list.delete_head()
assert result == -9
assert (
str(__snake_case ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
__SCREAMING_SNAKE_CASE = linked_list.delete_tail()
assert result == 1_2.2
assert (
str(__snake_case ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
__SCREAMING_SNAKE_CASE = linked_list.delete_nth(10 )
assert result is None
assert (
str(__snake_case ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node("Hello again, world!" ) )
assert (
str(__snake_case )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(__snake_case )
assert (
str(__snake_case )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(__snake_case )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def _A ( ) -> Union[str, Any]:
"""simple docstring"""
from doctest import testmod
testmod()
__SCREAMING_SNAKE_CASE = LinkedList()
linked_list.insert_head(input("Inserting 1st at head " ).strip() )
linked_list.insert_head(input("Inserting 2nd at head " ).strip() )
print("\nPrint list:" )
linked_list.print_list()
linked_list.insert_tail(input("\nInserting 1st at tail " ).strip() )
linked_list.insert_tail(input("Inserting 2nd at tail " ).strip() )
print("\nPrint list:" )
linked_list.print_list()
print("\nDelete head" )
linked_list.delete_head()
print("Delete tail" )
linked_list.delete_tail()
print("\nPrint list:" )
linked_list.print_list()
print("\nReverse linked list" )
linked_list.reverse()
print("\nPrint list:" )
linked_list.print_list()
print("\nString representation of linked list:" )
print(__snake_case )
print("\nReading/changing Node data using indexing:" )
print(f'''Element at Position 1: {linked_list[1]}''' )
__SCREAMING_SNAKE_CASE = input("Enter New Value: " ).strip()
print("New list:" )
print(__snake_case )
print(f'''length of linked_list is : {len(__snake_case )}''' )
if __name__ == "__main__":
main()
| 693
| 0
|
"""simple docstring"""
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
_UpperCamelCase = {
"""vocab_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
_UpperCamelCase = {
"""vocab_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
_UpperCamelCase = {
"""vocab_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"""
),
},
}
_UpperCamelCase = {
"""facebook/dpr-ctx_encoder-single-nq-base""": 512,
"""facebook/dpr-ctx_encoder-multiset-base""": 512,
}
_UpperCamelCase = {
"""facebook/dpr-question_encoder-single-nq-base""": 512,
"""facebook/dpr-question_encoder-multiset-base""": 512,
}
_UpperCamelCase = {
"""facebook/dpr-reader-single-nq-base""": 512,
"""facebook/dpr-reader-multiset-base""": 512,
}
_UpperCamelCase = {
"""facebook/dpr-ctx_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-ctx_encoder-multiset-base""": {"""do_lower_case""": True},
}
_UpperCamelCase = {
"""facebook/dpr-question_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-question_encoder-multiset-base""": {"""do_lower_case""": True},
}
_UpperCamelCase = {
"""facebook/dpr-reader-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-reader-multiset-base""": {"""do_lower_case""": True},
}
class lowerCamelCase__ ( snake_case ):
SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
SCREAMING_SNAKE_CASE = DPRContextEncoderTokenizer
class lowerCamelCase__ ( snake_case ):
SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
SCREAMING_SNAKE_CASE = DPRQuestionEncoderTokenizer
_UpperCamelCase = collections.namedtuple(
"""DPRSpanPrediction""", ["""span_score""", """relevance_score""", """doc_id""", """start_index""", """end_index""", """text"""]
)
_UpperCamelCase = collections.namedtuple("""DPRReaderOutput""", ["""start_logits""", """end_logits""", """relevance_logits"""])
_UpperCamelCase = R"""
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `'tf'`: Return TensorFlow `tf.constant` objects.
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer's default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Return:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
"""
@add_start_docstrings(snake_case )
class lowerCamelCase__ :
def __call__( self ,A ,A = None ,A = None ,A = False ,A = False ,A = None ,A = None ,A = None ,**A ,):
if titles is None and texts is None:
return super().__call__(
A ,padding=A ,truncation=A ,max_length=A ,return_tensors=A ,return_attention_mask=A ,**A ,)
elif titles is None or texts is None:
UpperCAmelCase = titles if texts is None else texts
return super().__call__(
A ,A ,padding=A ,truncation=A ,max_length=A ,return_tensors=A ,return_attention_mask=A ,**A ,)
UpperCAmelCase = titles if not isinstance(A ,A ) else [titles]
UpperCAmelCase = texts if not isinstance(A ,A ) else [texts]
UpperCAmelCase = len(A )
UpperCAmelCase = questions if not isinstance(A ,A ) else [questions] * n_passages
assert len(A ) == len(
A ), F'''There should be as many titles than texts but got {len(A )} titles and {len(A )} texts.'''
UpperCAmelCase = super().__call__(A ,A ,padding=A ,truncation=A )["""input_ids"""]
UpperCAmelCase = super().__call__(A ,add_special_tokens=A ,padding=A ,truncation=A )["""input_ids"""]
UpperCAmelCase = {
"""input_ids""": [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(A ,A )
]
}
if return_attention_mask is not False:
UpperCAmelCase = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
UpperCAmelCase = attention_mask
return self.pad(A ,padding=A ,max_length=A ,return_tensors=A )
def _UpperCamelCase ( self ,A ,A ,A = 16 ,A = 64 ,A = 4 ,):
UpperCAmelCase = reader_input["""input_ids"""]
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = reader_output[:3]
UpperCAmelCase = len(A )
UpperCAmelCase = sorted(range(A ) ,reverse=A ,key=relevance_logits.__getitem__ )
UpperCAmelCase = []
for doc_id in sorted_docs:
UpperCAmelCase = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
UpperCAmelCase = sequence_ids.index(self.sep_token_id ,2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
UpperCAmelCase = sequence_ids.index(self.pad_token_id )
else:
UpperCAmelCase = len(A )
UpperCAmelCase = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] ,end_logits=end_logits[doc_id][passage_offset:sequence_len] ,max_answer_length=A ,top_spans=A ,)
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] ,relevance_score=relevance_logits[doc_id] ,doc_id=A ,start_index=A ,end_index=A ,text=self.decode(sequence_ids[start_index : end_index + 1] ) ,) )
if len(A ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def _UpperCamelCase ( self ,A ,A ,A ,A ,):
UpperCAmelCase = []
for start_index, start_score in enumerate(A ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
UpperCAmelCase = sorted(A ,key=lambda A : x[1] ,reverse=A )
UpperCAmelCase = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, F'''Wrong span indices: [{start_index}:{end_index}]'''
UpperCAmelCase = end_index - start_index + 1
assert length <= max_answer_length, F'''Span is too long: {length} > {max_answer_length}'''
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(A ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(snake_case )
class lowerCamelCase__ ( snake_case , snake_case ):
SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE = READER_PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE = READER_PRETRAINED_INIT_CONFIGURATION
SCREAMING_SNAKE_CASE = ['''input_ids''', '''attention_mask''']
SCREAMING_SNAKE_CASE = DPRReaderTokenizer
| 701
|
"""simple docstring"""
from __future__ import annotations
from typing import Any
class lowerCamelCase__ :
def __init__( self ,A = 6 ):
UpperCAmelCase = None
UpperCAmelCase = None
self.create_linked_list(A )
def _UpperCamelCase ( self ,A ):
UpperCAmelCase = Node()
UpperCAmelCase = current_node
UpperCAmelCase = current_node
UpperCAmelCase = current_node
for _ in range(1 ,A ):
UpperCAmelCase = Node()
UpperCAmelCase = current_node
UpperCAmelCase = previous_node
UpperCAmelCase = current_node
UpperCAmelCase = self.front
UpperCAmelCase = previous_node
def _UpperCamelCase ( self ):
return (
self.front == self.rear
and self.front is not None
and self.front.data is None
)
def _UpperCamelCase ( self ):
self.check_can_perform_operation()
return self.front.data if self.front else None
def _UpperCamelCase ( self ,A ):
if self.rear is None:
return
self.check_is_full()
if not self.is_empty():
UpperCAmelCase = self.rear.next
if self.rear:
UpperCAmelCase = data
def _UpperCamelCase ( self ):
self.check_can_perform_operation()
if self.rear is None or self.front is None:
return None
if self.front == self.rear:
UpperCAmelCase = self.front.data
UpperCAmelCase = None
return data
UpperCAmelCase = self.front
UpperCAmelCase = old_front.next
UpperCAmelCase = old_front.data
UpperCAmelCase = None
return data
def _UpperCamelCase ( self ):
if self.is_empty():
raise Exception("""Empty Queue""" )
def _UpperCamelCase ( self ):
if self.rear and self.rear.next == self.front:
raise Exception("""Full Queue""" )
class lowerCamelCase__ :
def __init__( self ):
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
if __name__ == "__main__":
import doctest
doctest.testmod()
| 74
| 0
|
import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class __magic_name__ :
def __init__( self : Any ,__SCREAMING_SNAKE_CASE : str ,__SCREAMING_SNAKE_CASE : int=1_3 ,__SCREAMING_SNAKE_CASE : List[Any]=7 ,__SCREAMING_SNAKE_CASE : Optional[Any]=True ,__SCREAMING_SNAKE_CASE : int=True ,__SCREAMING_SNAKE_CASE : Optional[Any]=False ,__SCREAMING_SNAKE_CASE : Union[str, Any]=True ,__SCREAMING_SNAKE_CASE : List[str]=9_9 ,__SCREAMING_SNAKE_CASE : int=3_2 ,__SCREAMING_SNAKE_CASE : Dict=5 ,__SCREAMING_SNAKE_CASE : Tuple=4 ,__SCREAMING_SNAKE_CASE : Optional[Any]=3_7 ,__SCREAMING_SNAKE_CASE : Optional[Any]="gelu" ,__SCREAMING_SNAKE_CASE : Any=0.1 ,__SCREAMING_SNAKE_CASE : str=0.1 ,__SCREAMING_SNAKE_CASE : Optional[Any]=5_1_2 ,__SCREAMING_SNAKE_CASE : int=1_6 ,__SCREAMING_SNAKE_CASE : Tuple=2 ,__SCREAMING_SNAKE_CASE : List[Any]=0.02 ,__SCREAMING_SNAKE_CASE : Optional[int]=3 ,__SCREAMING_SNAKE_CASE : Dict=4 ,__SCREAMING_SNAKE_CASE : str=None ,):
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = seq_length
UpperCAmelCase = is_training
UpperCAmelCase = use_input_mask
UpperCAmelCase = use_token_type_ids
UpperCAmelCase = use_labels
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = type_vocab_size
UpperCAmelCase = type_sequence_label_size
UpperCAmelCase = initializer_range
UpperCAmelCase = num_labels
UpperCAmelCase = num_choices
UpperCAmelCase = scope
def _UpperCAmelCase ( self : Dict ):
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
UpperCAmelCase = None
if self.use_input_mask:
UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase = None
if self.use_token_type_ids:
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
if self.use_labels:
UpperCAmelCase = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
UpperCAmelCase = ids_tensor([self.batch_size] ,self.num_choices )
UpperCAmelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _UpperCAmelCase ( self : List[str] ):
return BioGptConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=__SCREAMING_SNAKE_CASE ,initializer_range=self.initializer_range ,)
def _UpperCAmelCase ( self : str ,__SCREAMING_SNAKE_CASE : Dict ,__SCREAMING_SNAKE_CASE : str ,__SCREAMING_SNAKE_CASE : Tuple ,__SCREAMING_SNAKE_CASE : Tuple ,__SCREAMING_SNAKE_CASE : Dict ,__SCREAMING_SNAKE_CASE : Dict ,__SCREAMING_SNAKE_CASE : Union[str, Any] ):
UpperCAmelCase = BioGptModel(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
UpperCAmelCase = model(__SCREAMING_SNAKE_CASE ,attention_mask=__SCREAMING_SNAKE_CASE )
UpperCAmelCase = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCAmelCase ( self : Union[str, Any] ,__SCREAMING_SNAKE_CASE : Dict ,__SCREAMING_SNAKE_CASE : Dict ,__SCREAMING_SNAKE_CASE : Optional[int] ,__SCREAMING_SNAKE_CASE : int ,__SCREAMING_SNAKE_CASE : List[Any] ,__SCREAMING_SNAKE_CASE : Tuple ,__SCREAMING_SNAKE_CASE : str ,__SCREAMING_SNAKE_CASE : int ,__SCREAMING_SNAKE_CASE : Any ,):
UpperCAmelCase = BioGptForCausalLM(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
UpperCAmelCase = model(__SCREAMING_SNAKE_CASE ,attention_mask=__SCREAMING_SNAKE_CASE ,token_type_ids=__SCREAMING_SNAKE_CASE ,labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCAmelCase ( self : int ,__SCREAMING_SNAKE_CASE : Union[str, Any] ,__SCREAMING_SNAKE_CASE : Optional[Any] ,__SCREAMING_SNAKE_CASE : Optional[int] ,__SCREAMING_SNAKE_CASE : Dict ,__SCREAMING_SNAKE_CASE : Dict ,*__SCREAMING_SNAKE_CASE : Optional[int] ):
UpperCAmelCase = BioGptModel(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
# create attention mask
UpperCAmelCase = torch.ones(input_ids.shape ,dtype=torch.long ,device=__SCREAMING_SNAKE_CASE )
UpperCAmelCase = self.seq_length // 2
UpperCAmelCase = 0
# first forward pass
UpperCAmelCase , UpperCAmelCase = model(__SCREAMING_SNAKE_CASE ,attention_mask=__SCREAMING_SNAKE_CASE ).to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCAmelCase = ids_tensor((self.batch_size, 1) ,config.vocab_size )
# change a random masked slice from input_ids
UpperCAmelCase = ids_tensor((1,) ,__SCREAMING_SNAKE_CASE ).item() + 1
UpperCAmelCase = ids_tensor((self.batch_size, 1) ,config.vocab_size ).squeeze(-1 )
UpperCAmelCase = random_other_next_tokens
# append to next input_ids and attn_mask
UpperCAmelCase = torch.cat([input_ids, next_tokens] ,dim=-1 )
UpperCAmelCase = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1) ,dtype=torch.long ,device=__SCREAMING_SNAKE_CASE )] ,dim=1 ,)
# get two different outputs
UpperCAmelCase = model(__SCREAMING_SNAKE_CASE ,attention_mask=__SCREAMING_SNAKE_CASE )["last_hidden_state"]
UpperCAmelCase = model(__SCREAMING_SNAKE_CASE ,past_key_values=__SCREAMING_SNAKE_CASE ,attention_mask=__SCREAMING_SNAKE_CASE )["last_hidden_state"]
# select random slice
UpperCAmelCase = ids_tensor((1,) ,output_from_past.shape[-1] ).item()
UpperCAmelCase = output_from_no_past[:, -1, random_slice_idx].detach()
UpperCAmelCase = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,atol=1e-3 ) )
def _UpperCAmelCase ( self : Dict ,__SCREAMING_SNAKE_CASE : Dict ,__SCREAMING_SNAKE_CASE : Dict ,__SCREAMING_SNAKE_CASE : Optional[Any] ,__SCREAMING_SNAKE_CASE : List[str] ,__SCREAMING_SNAKE_CASE : Optional[int] ,*__SCREAMING_SNAKE_CASE : Optional[int] ):
UpperCAmelCase = BioGptModel(config=__SCREAMING_SNAKE_CASE ).to(__SCREAMING_SNAKE_CASE ).eval()
UpperCAmelCase = torch.ones(input_ids.shape ,dtype=torch.long ,device=__SCREAMING_SNAKE_CASE )
# first forward pass
UpperCAmelCase = model(__SCREAMING_SNAKE_CASE ,attention_mask=__SCREAMING_SNAKE_CASE ,use_cache=__SCREAMING_SNAKE_CASE )
UpperCAmelCase , UpperCAmelCase = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
UpperCAmelCase = ids_tensor((self.batch_size, 3) ,config.vocab_size )
UpperCAmelCase = ids_tensor((self.batch_size, 3) ,2 )
# append to next input_ids and
UpperCAmelCase = torch.cat([input_ids, next_tokens] ,dim=-1 )
UpperCAmelCase = torch.cat([attention_mask, next_attn_mask] ,dim=-1 )
UpperCAmelCase = model(__SCREAMING_SNAKE_CASE ,attention_mask=__SCREAMING_SNAKE_CASE )["last_hidden_state"]
UpperCAmelCase = model(__SCREAMING_SNAKE_CASE ,attention_mask=__SCREAMING_SNAKE_CASE ,past_key_values=__SCREAMING_SNAKE_CASE )[
"last_hidden_state"
]
# select random slice
UpperCAmelCase = ids_tensor((1,) ,output_from_past.shape[-1] ).item()
UpperCAmelCase = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCAmelCase = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,atol=1e-3 ) )
def _UpperCAmelCase ( self : Union[str, Any] ,__SCREAMING_SNAKE_CASE : int ,__SCREAMING_SNAKE_CASE : Any ,__SCREAMING_SNAKE_CASE : Tuple ,__SCREAMING_SNAKE_CASE : str ,__SCREAMING_SNAKE_CASE : Any ,*__SCREAMING_SNAKE_CASE : Union[str, Any] ,__SCREAMING_SNAKE_CASE : List[str]=False ):
UpperCAmelCase = BioGptForCausalLM(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
if gradient_checkpointing:
model.gradient_checkpointing_enable()
UpperCAmelCase = model(__SCREAMING_SNAKE_CASE ,labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.loss.shape ,() )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
result.loss.backward()
def _UpperCAmelCase ( self : List[Any] ,__SCREAMING_SNAKE_CASE : List[Any] ,*__SCREAMING_SNAKE_CASE : Dict ):
UpperCAmelCase = BioGptModel(__SCREAMING_SNAKE_CASE )
UpperCAmelCase = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers )
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) ,0.001 )
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) ,0.01 )
def _UpperCAmelCase ( self : int ,__SCREAMING_SNAKE_CASE : str ,__SCREAMING_SNAKE_CASE : List[str] ,__SCREAMING_SNAKE_CASE : List[str] ,__SCREAMING_SNAKE_CASE : Union[str, Any] ,__SCREAMING_SNAKE_CASE : str ,*__SCREAMING_SNAKE_CASE : Dict ):
UpperCAmelCase = self.num_labels
UpperCAmelCase = BioGptForTokenClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
UpperCAmelCase = model(__SCREAMING_SNAKE_CASE ,attention_mask=__SCREAMING_SNAKE_CASE ,token_type_ids=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def _UpperCAmelCase ( self : Dict ):
UpperCAmelCase = self.prepare_config_and_inputs()
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) = config_and_inputs
UpperCAmelCase = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class __magic_name__ ( _a , _a , _a , unittest.TestCase):
_UpperCAmelCase : Union[str, Any] = (
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
_UpperCAmelCase : int = (BioGptForCausalLM,) if is_torch_available() else ()
_UpperCAmelCase : str = (
{
'feature-extraction': BioGptModel,
'text-classification': BioGptForSequenceClassification,
'text-generation': BioGptForCausalLM,
'token-classification': BioGptForTokenClassification,
'zero-shot': BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
_UpperCAmelCase : List[Any] = False
def _UpperCAmelCase ( self : Optional[int] ):
UpperCAmelCase = BioGptModelTester(self )
UpperCAmelCase = ConfigTester(self ,config_class=__SCREAMING_SNAKE_CASE ,hidden_size=3_7 )
def _UpperCAmelCase ( self : Optional[Any] ):
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self : List[str] ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self : Tuple ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCAmelCase = type
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self : int ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*__SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self : Dict ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*__SCREAMING_SNAKE_CASE ,gradient_checkpointing=__SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self : Any ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*__SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self : Optional[Any] ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*__SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self : Optional[Any] ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*__SCREAMING_SNAKE_CASE )
@slow
def _UpperCAmelCase ( self : Optional[Any] ):
UpperCAmelCase = BioGptForCausalLM.from_pretrained("microsoft/biogpt" )
model.to(__SCREAMING_SNAKE_CASE )
UpperCAmelCase = BioGptTokenizer.from_pretrained("microsoft/biogpt" )
UpperCAmelCase = "left"
# Define PAD Token = EOS Token = 50256
UpperCAmelCase = tokenizer.eos_token
UpperCAmelCase = model.config.eos_token_id
# use different length sentences to test batching
UpperCAmelCase = [
"Hello, my dog is a little",
"Today, I",
]
UpperCAmelCase = tokenizer(__SCREAMING_SNAKE_CASE ,return_tensors="pt" ,padding=__SCREAMING_SNAKE_CASE )
UpperCAmelCase = inputs["input_ids"].to(__SCREAMING_SNAKE_CASE )
UpperCAmelCase = model.generate(
input_ids=__SCREAMING_SNAKE_CASE ,attention_mask=inputs["attention_mask"].to(__SCREAMING_SNAKE_CASE ) ,)
UpperCAmelCase = tokenizer(sentences[0] ,return_tensors="pt" ).input_ids.to(__SCREAMING_SNAKE_CASE )
UpperCAmelCase = model.generate(input_ids=__SCREAMING_SNAKE_CASE )
UpperCAmelCase = inputs_non_padded.shape[-1] - inputs["attention_mask"][-1].long().sum().cpu().item()
UpperCAmelCase = tokenizer(sentences[1] ,return_tensors="pt" ).input_ids.to(__SCREAMING_SNAKE_CASE )
UpperCAmelCase = model.generate(input_ids=__SCREAMING_SNAKE_CASE ,max_length=model.config.max_length - num_paddings )
UpperCAmelCase = tokenizer.batch_decode(__SCREAMING_SNAKE_CASE ,skip_special_tokens=__SCREAMING_SNAKE_CASE )
UpperCAmelCase = tokenizer.decode(output_non_padded[0] ,skip_special_tokens=__SCREAMING_SNAKE_CASE )
UpperCAmelCase = tokenizer.decode(output_padded[0] ,skip_special_tokens=__SCREAMING_SNAKE_CASE )
UpperCAmelCase = [
"Hello, my dog is a little bit bigger than a little bit.",
"Today, I have a good idea of how to use the information",
]
self.assertListEqual(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE ,[non_padded_sentence, padded_sentence] )
@slow
def _UpperCAmelCase ( self : Dict ):
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase = BioGptModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self : int ):
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase = 3
UpperCAmelCase = input_dict["input_ids"]
UpperCAmelCase = input_ids.ne(1 ).to(__SCREAMING_SNAKE_CASE )
UpperCAmelCase = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size )
UpperCAmelCase = BioGptForSequenceClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
UpperCAmelCase = model(__SCREAMING_SNAKE_CASE ,attention_mask=__SCREAMING_SNAKE_CASE ,labels=__SCREAMING_SNAKE_CASE )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
def _UpperCAmelCase ( self : Dict ):
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase = 3
UpperCAmelCase = "multi_label_classification"
UpperCAmelCase = input_dict["input_ids"]
UpperCAmelCase = input_ids.ne(1 ).to(__SCREAMING_SNAKE_CASE )
UpperCAmelCase = ids_tensor(
[self.model_tester.batch_size, config.num_labels] ,self.model_tester.type_sequence_label_size ).to(torch.float )
UpperCAmelCase = BioGptForSequenceClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
UpperCAmelCase = model(__SCREAMING_SNAKE_CASE ,attention_mask=__SCREAMING_SNAKE_CASE ,labels=__SCREAMING_SNAKE_CASE )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
@require_torch
class __magic_name__ ( unittest.TestCase):
@slow
def _UpperCAmelCase ( self : List[str] ):
UpperCAmelCase = BioGptForCausalLM.from_pretrained("microsoft/biogpt" )
UpperCAmelCase = torch.tensor([[2, 4_8_0_5, 9, 6_5_6, 2_1]] )
UpperCAmelCase = model(__SCREAMING_SNAKE_CASE )[0]
UpperCAmelCase = 4_2_3_8_4
UpperCAmelCase = torch.Size((1, 5, vocab_size) )
self.assertEqual(output.shape ,__SCREAMING_SNAKE_CASE )
UpperCAmelCase = torch.tensor(
[[[-9.5236, -9.8918, 10.4557], [-11.0469, -9.6423, 8.1022], [-8.8664, -7.8826, 5.5325]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] ,__SCREAMING_SNAKE_CASE ,atol=1e-4 ) )
@slow
def _UpperCAmelCase ( self : List[str] ):
UpperCAmelCase = BioGptTokenizer.from_pretrained("microsoft/biogpt" )
UpperCAmelCase = BioGptForCausalLM.from_pretrained("microsoft/biogpt" )
model.to(__SCREAMING_SNAKE_CASE )
torch.manual_seed(0 )
UpperCAmelCase = tokenizer("COVID-19 is" ,return_tensors="pt" ).to(__SCREAMING_SNAKE_CASE )
UpperCAmelCase = model.generate(
**__SCREAMING_SNAKE_CASE ,min_length=1_0_0 ,max_length=1_0_2_4 ,num_beams=5 ,early_stopping=__SCREAMING_SNAKE_CASE ,)
UpperCAmelCase = tokenizer.decode(output_ids[0] ,skip_special_tokens=__SCREAMING_SNAKE_CASE )
UpperCAmelCase = (
"COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the"
" causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and"
" territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),"
" and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and"
" more than 800,000 deaths."
)
self.assertEqual(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
| 333
|
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
__lowerCAmelCase =imread(R"digital_image_processing/image_data/lena_small.jpg")
__lowerCAmelCase =cvtColor(img, COLOR_BGR2GRAY)
def __UpperCamelCase ( ):
"""simple docstring"""
UpperCAmelCase = cn.convert_to_negative(_lowerCAmelCase )
# assert negative_img array for at least one True
assert negative_img.any()
def __UpperCamelCase ( ):
"""simple docstring"""
with Image.open("digital_image_processing/image_data/lena_small.jpg" ) as img:
# Work around assertion for response
assert str(cc.change_contrast(_lowerCAmelCase , 1_10 ) ).startswith(
"<PIL.Image.Image image mode=RGB size=100x100 at" )
def __UpperCamelCase ( ):
"""simple docstring"""
UpperCAmelCase = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def __UpperCamelCase ( ):
"""simple docstring"""
UpperCAmelCase = imread("digital_image_processing/image_data/lena_small.jpg" , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
UpperCAmelCase = canny.canny(_lowerCAmelCase )
# assert canny array for at least one True
assert canny_array.any()
def __UpperCamelCase ( ):
"""simple docstring"""
assert gg.gaussian_filter(_lowerCAmelCase , 5 , sigma=0.9 ).all()
def __UpperCamelCase ( ):
"""simple docstring"""
UpperCAmelCase = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
UpperCAmelCase = conv.img_convolve(_lowerCAmelCase , _lowerCAmelCase ).astype(_lowerCAmelCase )
assert res.any()
def __UpperCamelCase ( ):
"""simple docstring"""
assert med.median_filter(_lowerCAmelCase , 3 ).any()
def __UpperCamelCase ( ):
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = sob.sobel_filter(_lowerCAmelCase )
assert grad.any() and theta.any()
def __UpperCamelCase ( ):
"""simple docstring"""
UpperCAmelCase = sp.make_sepia(_lowerCAmelCase , 20 )
assert sepia.all()
def __UpperCamelCase ( _lowerCAmelCase = "digital_image_processing/image_data/lena_small.jpg" ):
"""simple docstring"""
UpperCAmelCase = bs.Burkes(imread(_lowerCAmelCase , 1 ) , 1_20 )
burkes.process()
assert burkes.output_img.any()
def __UpperCamelCase ( _lowerCAmelCase = "digital_image_processing/image_data/lena_small.jpg" , ):
"""simple docstring"""
UpperCAmelCase = rs.NearestNeighbour(imread(_lowerCAmelCase , 1 ) , 4_00 , 2_00 )
nn.process()
assert nn.output.any()
def __UpperCamelCase ( ):
"""simple docstring"""
UpperCAmelCase = "digital_image_processing/image_data/lena.jpg"
# Reading the image and converting it to grayscale.
UpperCAmelCase = imread(_lowerCAmelCase , 0 )
# Test for get_neighbors_pixel function() return not None
UpperCAmelCase = 0
UpperCAmelCase = 0
UpperCAmelCase = image[x_coordinate][y_coordinate]
UpperCAmelCase = lbp.get_neighbors_pixel(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
UpperCAmelCase = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
UpperCAmelCase = lbp.local_binary_value(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
assert lbp_image.any()
| 333
| 1
|
'''simple docstring'''
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def _snake_case ( A ) -> Dict:
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class a__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@staticmethod
def __SCREAMING_SNAKE_CASE ( lowerCamelCase_ ) -> str:
lowerCAmelCase__ = parser.add_parser('''download''' )
download_parser.add_argument(
'''--cache-dir''' , type=_a , default=_a , help='''Path to location to store the models''' )
download_parser.add_argument(
'''--force''' , action='''store_true''' , help='''Force the model to be download even if already in cache-dir''' )
download_parser.add_argument(
'''--trust-remote-code''' , action='''store_true''' , help='''Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you\'ve reviewed the code as it will execute on your local machine''' , )
download_parser.add_argument('''model''' , type=_a , help='''Name of the model to download''' )
download_parser.set_defaults(func=_a )
def __init__( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> List[str]:
lowerCAmelCase__ = model
lowerCAmelCase__ = cache
lowerCAmelCase__ = force
lowerCAmelCase__ = trust_remote_code
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
| 705
|
'''simple docstring'''
from collections import Counter
from timeit import timeit
def _snake_case ( A = "" , ) -> bool:
return sum(c % 2 for c in Counter(input_str.replace(''' ''' , '''''' ).lower() ).values() ) < 2
def _snake_case ( A = "" ) -> bool:
if len(A ) == 0:
return True
lowerCAmelCase__ = input_str.replace(''' ''' , '''''' ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
lowerCAmelCase__ = {}
for character in lower_case_input_str:
lowerCAmelCase__ = character_freq_dict.get(A , 0 ) + 1
lowerCAmelCase__ = 0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def _snake_case ( A = "" ) -> None:
print('''\nFor string = ''' , A , ''':''' )
print(
'''> can_string_be_rearranged_as_palindrome_counter()''' , '''\tans =''' , can_string_be_rearranged_as_palindrome_counter(A ) , '''\ttime =''' , timeit(
'''z.can_string_be_rearranged_as_palindrome_counter(z.check_str)''' , setup='''import __main__ as z''' , ) , '''seconds''' , )
print(
'''> can_string_be_rearranged_as_palindrome()''' , '''\tans =''' , can_string_be_rearranged_as_palindrome(A ) , '''\ttime =''' , timeit(
'''z.can_string_be_rearranged_as_palindrome(z.check_str)''' , setup='''import __main__ as z''' , ) , '''seconds''' , )
if __name__ == "__main__":
__UpperCAmelCase = input(
'''Enter string to determine if it can be rearranged as a palindrome or not: '''
).strip()
benchmark(check_str)
__UpperCAmelCase = can_string_be_rearranged_as_palindrome_counter(check_str)
print(f"""{check_str} can {'' if status else 'not '}be rearranged as a palindrome""")
| 98
| 0
|
"""simple docstring"""
from typing import List, Optional
import numpy as np
from ...processing_utils import ProcessorMixin
from ...utils import to_numpy
class _UpperCAmelCase ( __UpperCAmelCase):
__a : Dict = '''EncodecFeatureExtractor'''
__a : Dict = ('''T5Tokenizer''', '''T5TokenizerFast''')
def __init__( self , _A , _A ) -> Tuple:
'''simple docstring'''
super().__init__(lowerCamelCase__ , lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = self.feature_extractor
_UpperCAmelCase : Optional[int] = False
def __snake_case ( self , _A=None , _A=None , _A=True ) -> Any:
'''simple docstring'''
return self.tokenizer.get_decoder_prompt_ids(task=lowerCamelCase__ , language=lowerCamelCase__ , no_timestamps=lowerCamelCase__ )
def __call__( self , *_A , **_A ) -> Tuple:
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor(*lowerCamelCase__ , **lowerCamelCase__ )
_UpperCAmelCase : Tuple = kwargs.pop("""audio""" , lowerCamelCase__ )
_UpperCAmelCase : Tuple = kwargs.pop("""sampling_rate""" , lowerCamelCase__ )
_UpperCAmelCase : Tuple = kwargs.pop("""text""" , lowerCamelCase__ )
if len(lowerCamelCase__ ) > 0:
_UpperCAmelCase : Any = args[0]
_UpperCAmelCase : List[str] = args[1:]
if audio is None and text is None:
raise ValueError("""You need to specify either an `audio` or `text` input to process.""" )
if text is not None:
_UpperCAmelCase : List[str] = self.tokenizer(lowerCamelCase__ , **lowerCamelCase__ )
if audio is not None:
_UpperCAmelCase : Any = self.feature_extractor(lowerCamelCase__ , *lowerCamelCase__ , sampling_rate=lowerCamelCase__ , **lowerCamelCase__ )
if audio is None:
return inputs
elif text is None:
return audio_inputs
else:
_UpperCAmelCase : Any = audio_inputs["""input_values"""]
if "padding_mask" in audio_inputs:
_UpperCAmelCase : Optional[int] = audio_inputs["""padding_mask"""]
return inputs
def __snake_case ( self , *_A , **_A ) -> Dict:
'''simple docstring'''
_UpperCAmelCase : str = kwargs.pop("""audio""" , lowerCamelCase__ )
_UpperCAmelCase : Any = kwargs.pop("""padding_mask""" , lowerCamelCase__ )
if len(lowerCamelCase__ ) > 0:
_UpperCAmelCase : Any = args[0]
_UpperCAmelCase : Any = args[1:]
if audio_values is not None:
return self._decode_audio(lowerCamelCase__ , padding_mask=lowerCamelCase__ )
else:
return self.tokenizer.batch_decode(*lowerCamelCase__ , **lowerCamelCase__ )
def __snake_case ( self , *_A , **_A ) -> Union[str, Any]:
'''simple docstring'''
return self.tokenizer.decode(*lowerCamelCase__ , **lowerCamelCase__ )
def __snake_case ( self , _A , _A = None ) -> List[np.ndarray]:
'''simple docstring'''
_UpperCAmelCase : Any = to_numpy(lowerCamelCase__ )
_UpperCAmelCase : List[str] = audio_values.shape
if padding_mask is None:
return list(lowerCamelCase__ )
_UpperCAmelCase : Any = to_numpy(lowerCamelCase__ )
# match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
# token (so that the generated audio values are **not** treated as padded tokens)
_UpperCAmelCase : int = seq_len - padding_mask.shape[-1]
_UpperCAmelCase : int = 1 - self.feature_extractor.padding_value
_UpperCAmelCase : Tuple = np.pad(lowerCamelCase__ , ((0, 0), (0, difference)) , """constant""" , constant_values=lowerCamelCase__ )
_UpperCAmelCase : List[str] = audio_values.tolist()
for i in range(lowerCamelCase__ ):
_UpperCAmelCase : List[str] = np.asarray(audio_values[i] )[
padding_mask[i][None, :] != self.feature_extractor.padding_value
]
_UpperCAmelCase : Optional[Any] = sliced_audio.reshape(lowerCamelCase__ , -1 )
return audio_values
| 238
|
"""simple docstring"""
from decimal import Decimal, getcontext
from math import ceil, factorial
def _lowerCamelCase ( lowerCamelCase__ : int ):
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
raise TypeError("""Undefined for non-integers""" )
elif precision < 1:
raise ValueError("""Undefined for non-natural numbers""" )
lowercase__ : List[str] = precision
lowercase__ : Optional[int] = ceil(precision / 14 )
lowercase__ : Union[str, Any] = 42_68_80 * Decimal(1_00_05 ).sqrt()
lowercase__ : Optional[Any] = 1
lowercase__ : Optional[Any] = 13_59_14_09
lowercase__ : List[Any] = Decimal(lowerCamelCase__ )
for k in range(1 , lowerCamelCase__ ):
lowercase__ : Optional[int] = factorial(6 * k ) // (factorial(3 * k ) * factorial(lowerCamelCase__ ) ** 3)
linear_term += 5_45_14_01_34
exponential_term *= -26_25_37_41_26_40_76_80_00
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
__snake_case = 50
print(F"The first {n} digits of pi is: {pi(n)}")
| 200
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A = logging.get_logger(__name__)
A = {
'microsoft/trocr-base-handwritten': (
'https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json'
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCAmelCase_ = 'trocr'
lowerCAmelCase_ = ['past_key_values']
lowerCAmelCase_ = {
'num_attention_heads': 'decoder_attention_heads',
'hidden_size': 'd_model',
'num_hidden_layers': 'decoder_layers',
}
def __init__( self : Dict , snake_case__ : Dict=5_0_2_6_5 , snake_case__ : Optional[int]=1_0_2_4 , snake_case__ : Dict=1_2 , snake_case__ : Optional[int]=1_6 , snake_case__ : int=4_0_9_6 , snake_case__ : Optional[int]="gelu" , snake_case__ : Dict=5_1_2 , snake_case__ : List[str]=0.1 , snake_case__ : List[Any]=0.0 , snake_case__ : Union[str, Any]=0.0 , snake_case__ : Union[str, Any]=2 , snake_case__ : Dict=0.02 , snake_case__ : Optional[int]=0.0 , snake_case__ : int=True , snake_case__ : List[str]=False , snake_case__ : Any=True , snake_case__ : Optional[int]=True , snake_case__ : str=1 , snake_case__ : Optional[int]=0 , snake_case__ : Union[str, Any]=2 , **snake_case__ : List[Any] , ) -> Optional[Any]:
_lowerCamelCase = vocab_size
_lowerCamelCase = d_model
_lowerCamelCase = decoder_layers
_lowerCamelCase = decoder_attention_heads
_lowerCamelCase = decoder_ffn_dim
_lowerCamelCase = activation_function
_lowerCamelCase = max_position_embeddings
_lowerCamelCase = dropout
_lowerCamelCase = attention_dropout
_lowerCamelCase = activation_dropout
_lowerCamelCase = init_std
_lowerCamelCase = decoder_layerdrop
_lowerCamelCase = use_cache
_lowerCamelCase = scale_embedding
_lowerCamelCase = use_learned_position_embeddings
_lowerCamelCase = layernorm_embedding
super().__init__(
pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , decoder_start_token_id=snake_case__ , **snake_case__ , )
| 234
|
import unittest
from transformers import MPNetConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self : List[Any] , snake_case__ : int , snake_case__ : Optional[Any]=1_3 , snake_case__ : Tuple=7 , snake_case__ : Any=True , snake_case__ : Any=True , snake_case__ : Optional[Any]=False , snake_case__ : Optional[Any]=True , snake_case__ : List[str]=9_9 , snake_case__ : Optional[int]=6_4 , snake_case__ : Dict=5 , snake_case__ : Dict=4 , snake_case__ : Union[str, Any]=6_4 , snake_case__ : Dict="gelu" , snake_case__ : Optional[int]=0.1 , snake_case__ : Optional[Any]=0.1 , snake_case__ : List[Any]=5_1_2 , snake_case__ : Any=1_6 , snake_case__ : List[str]=2 , snake_case__ : Dict=0.02 , snake_case__ : Tuple=3 , snake_case__ : Optional[int]=4 , snake_case__ : Optional[int]=None , ) -> Optional[int]:
_lowerCamelCase = parent
_lowerCamelCase = batch_size
_lowerCamelCase = seq_length
_lowerCamelCase = is_training
_lowerCamelCase = use_input_mask
_lowerCamelCase = use_token_type_ids
_lowerCamelCase = use_labels
_lowerCamelCase = vocab_size
_lowerCamelCase = hidden_size
_lowerCamelCase = num_hidden_layers
_lowerCamelCase = num_attention_heads
_lowerCamelCase = intermediate_size
_lowerCamelCase = hidden_act
_lowerCamelCase = hidden_dropout_prob
_lowerCamelCase = attention_probs_dropout_prob
_lowerCamelCase = max_position_embeddings
_lowerCamelCase = type_vocab_size
_lowerCamelCase = type_sequence_label_size
_lowerCamelCase = initializer_range
_lowerCamelCase = num_labels
_lowerCamelCase = num_choices
_lowerCamelCase = scope
def _snake_case ( self : Tuple ) -> Union[str, Any]:
return MPNetConfig.from_pretrained('microsoft/mpnet-base' )
def _snake_case ( self : List[str] ) -> str:
_lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCamelCase = None
if self.use_input_mask:
_lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCamelCase = None
_lowerCamelCase = None
_lowerCamelCase = None
if self.use_labels:
_lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCamelCase = ids_tensor([self.batch_size] , self.num_choices )
_lowerCamelCase = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _snake_case ( self : Union[str, Any] ) -> int:
return MPNetConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def _snake_case ( self : str , snake_case__ : List[Any] , snake_case__ : Dict , snake_case__ : int , snake_case__ : List[str] , snake_case__ : Optional[Any] , snake_case__ : Dict ) -> Dict:
_lowerCamelCase = MPNetModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
_lowerCamelCase = model(snake_case__ , snake_case__ )
_lowerCamelCase = model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _snake_case ( self : Optional[Any] , snake_case__ : Optional[Any] , snake_case__ : Optional[int] , snake_case__ : str , snake_case__ : str , snake_case__ : Tuple , snake_case__ : Union[str, Any] ) -> List[str]:
_lowerCamelCase = MPNetForQuestionAnswering(config=snake_case__ )
model.to(snake_case__ )
model.eval()
_lowerCamelCase = model(
snake_case__ , attention_mask=snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _snake_case ( self : Optional[int] , snake_case__ : Tuple , snake_case__ : Optional[int] , snake_case__ : Tuple , snake_case__ : Optional[Any] , snake_case__ : Dict , snake_case__ : List[str] ) -> List[str]:
_lowerCamelCase = self.num_labels
_lowerCamelCase = MPNetForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
_lowerCamelCase = model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _snake_case ( self : Optional[int] , snake_case__ : Dict , snake_case__ : Tuple , snake_case__ : Union[str, Any] , snake_case__ : int , snake_case__ : int , snake_case__ : Optional[int] ) -> Any:
_lowerCamelCase = self.num_choices
_lowerCamelCase = MPNetForMultipleChoice(config=snake_case__ )
model.to(snake_case__ )
model.eval()
_lowerCamelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCamelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCamelCase = model(
snake_case__ , attention_mask=snake_case__ , labels=snake_case__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _snake_case ( self : str , snake_case__ : Any , snake_case__ : Dict , snake_case__ : str , snake_case__ : int , snake_case__ : Optional[Any] , snake_case__ : str ) -> Optional[int]:
_lowerCamelCase = self.num_labels
_lowerCamelCase = MPNetForTokenClassification(config=snake_case__ )
model.to(snake_case__ )
model.eval()
_lowerCamelCase = model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _snake_case ( self : int ) -> Any:
_lowerCamelCase = self.prepare_config_and_inputs()
((_lowerCamelCase) , (_lowerCamelCase) , (_lowerCamelCase) , (_lowerCamelCase) , (_lowerCamelCase) , (_lowerCamelCase)) = config_and_inputs
_lowerCamelCase = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ = (
(
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
if is_torch_available()
else ()
)
lowerCAmelCase_ = (
{
'feature-extraction': MPNetModel,
'fill-mask': MPNetForMaskedLM,
'question-answering': MPNetForQuestionAnswering,
'text-classification': MPNetForSequenceClassification,
'token-classification': MPNetForTokenClassification,
'zero-shot': MPNetForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase_ = False
lowerCAmelCase_ = True
def _snake_case ( self : Any ) -> List[Any]:
_lowerCamelCase = MPNetModelTester(self )
_lowerCamelCase = ConfigTester(self , config_class=snake_case__ , hidden_size=3_7 )
def _snake_case ( self : Tuple ) -> Tuple:
self.config_tester.run_common_tests()
def _snake_case ( self : Tuple ) -> List[Any]:
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_model(*snake_case__ )
def _snake_case ( self : List[Any] ) -> Optional[int]:
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_sequence_classification(*snake_case__ )
def _snake_case ( self : Optional[int] ) -> List[Any]:
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_multiple_choice(*snake_case__ )
def _snake_case ( self : Tuple ) -> Union[str, Any]:
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_token_classification(*snake_case__ )
def _snake_case ( self : Dict ) -> Dict:
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_question_answering(*snake_case__ )
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _snake_case ( self : Tuple ) -> Optional[Any]:
_lowerCamelCase = MPNetModel.from_pretrained('microsoft/mpnet-base' )
_lowerCamelCase = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
_lowerCamelCase = model(snake_case__ )[0]
_lowerCamelCase = torch.Size((1, 1_1, 7_6_8) )
self.assertEqual(output.shape , snake_case__ )
_lowerCamelCase = torch.tensor(
[[[-0.0550, 0.1943, -0.0740], [-0.0562, 0.2211, -0.0579], [-0.0437, 0.3337, -0.0641]]] )
# compare the actual values for a slice.
self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case__ , atol=1e-4 ) )
| 234
| 1
|
from typing import List, Optional, Union
import numpy as np
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
SCREAMING_SNAKE_CASE__ : Optional[Any] = logging.get_logger(__name__)
class UpperCamelCase__ (lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ : str = ["""input_values""", """padding_mask"""]
def __init__( self , UpperCamelCase__ = 1 , UpperCamelCase__ = 2_4000 , UpperCamelCase__ = 0.0 , UpperCamelCase__ = None , UpperCamelCase__ = None , **UpperCamelCase__ , ) -> Dict:
super().__init__(feature_size=UpperCamelCase__ , sampling_rate=UpperCamelCase__ , padding_value=UpperCamelCase__ , **UpperCamelCase__ )
lowerCamelCase : int = chunk_length_s
lowerCamelCase : Any = overlap
@property
def _lowercase ( self ) -> Optional[int]:
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def _lowercase ( self ) -> Optional[int]:
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
def __call__( self , UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = False , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , ) -> BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
F''' {self.sampling_rate}. Please make sure that the provided audio input was sampled with'''
F''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
if padding and truncation:
raise ValueError("Both padding and truncation were set. Make sure you only set one." )
elif padding is None:
# by default let's pad the inputs
lowerCamelCase : List[str] = True
lowerCamelCase : List[Any] = bool(
isinstance(UpperCamelCase__ , (list, tuple) ) and (isinstance(raw_audio[0] , (np.ndarray, tuple, list) )) )
if is_batched:
lowerCamelCase : Any = [np.asarray(UpperCamelCase__ , dtype=np.floataa ).T for audio in raw_audio]
elif not is_batched and not isinstance(UpperCamelCase__ , np.ndarray ):
lowerCamelCase : Dict = np.asarray(UpperCamelCase__ , dtype=np.floataa )
elif isinstance(UpperCamelCase__ , np.ndarray ) and raw_audio.dtype is np.dtype(np.floataa ):
lowerCamelCase : Optional[Any] = raw_audio.astype(np.floataa )
# always return batch
if not is_batched:
lowerCamelCase : Union[str, Any] = [np.asarray(UpperCamelCase__ ).T]
# verify inputs are valid
for idx, example in enumerate(UpperCamelCase__ ):
if example.ndim > 2:
raise ValueError(F'''Expected input shape (channels, length) but got shape {example.shape}''' )
if self.feature_size == 1 and example.ndim != 1:
raise ValueError(F'''Expected mono audio but example has {example.shape[-1]} channels''' )
if self.feature_size == 2 and example.shape[-1] != 2:
raise ValueError(F'''Expected stereo audio but example has {example.shape[-1]} channels''' )
lowerCamelCase : Optional[Any] = None
lowerCamelCase : Optional[int] = BatchFeature({"input_values": raw_audio} )
if self.chunk_stride is not None and self.chunk_length is not None and max_length is None:
if truncation:
lowerCamelCase : Optional[Any] = min(array.shape[0] for array in raw_audio )
lowerCamelCase : int = int(np.floor(max_length / self.chunk_stride ) )
lowerCamelCase : Optional[Any] = (nb_step - 1) * self.chunk_stride + self.chunk_length
elif padding:
lowerCamelCase : str = max(array.shape[0] for array in raw_audio )
lowerCamelCase : Tuple = int(np.ceil(max_length / self.chunk_stride ) )
lowerCamelCase : Dict = (nb_step - 1) * self.chunk_stride + self.chunk_length
lowerCamelCase : Optional[int] = "max_length"
else:
lowerCamelCase : List[Any] = input_values
# normal padding on batch
if padded_inputs is None:
lowerCamelCase : str = self.pad(
UpperCamelCase__ , max_length=UpperCamelCase__ , truncation=UpperCamelCase__ , padding=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , )
if padding:
lowerCamelCase : str = padded_inputs.pop("attention_mask" )
lowerCamelCase : int = []
for example in padded_inputs.pop("input_values" ):
if self.feature_size == 1:
lowerCamelCase : str = example[..., None]
input_values.append(example.T )
lowerCamelCase : Union[str, Any] = input_values
if return_tensors is not None:
lowerCamelCase : Tuple = padded_inputs.convert_to_tensors(UpperCamelCase__ )
return padded_inputs
| 311
|
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ : List[str] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : List[str] = {
'Salesforce/blip-vqa-base': 'https://huggingface.co/Salesforce/blip-vqa-base/resolve/main/config.json',
'Salesforce/blip-vqa-capfit-large': (
'https://huggingface.co/Salesforce/blip-vqa-base-capfit/resolve/main/config.json'
),
'Salesforce/blip-image-captioning-base': (
'https://huggingface.co/Salesforce/blip-image-captioning-base/resolve/main/config.json'
),
'Salesforce/blip-image-captioning-large': (
'https://huggingface.co/Salesforce/blip-image-captioning-large/resolve/main/config.json'
),
'Salesforce/blip-itm-base-coco': 'https://huggingface.co/Salesforce/blip-itm-base-coco/resolve/main/config.json',
'Salesforce/blip-itm-large-coco': 'https://huggingface.co/Salesforce/blip-itm-large-coco/resolve/main/config.json',
'Salesforce/blip-itm-base-flikr': 'https://huggingface.co/Salesforce/blip-itm-base-flikr/resolve/main/config.json',
'Salesforce/blip-itm-large-flikr': (
'https://huggingface.co/Salesforce/blip-itm-large-flikr/resolve/main/config.json'
),
}
class UpperCamelCase__ (lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ : int = """blip_text_model"""
def __init__( self , UpperCamelCase__=3_0524 , UpperCamelCase__=768 , UpperCamelCase__=768 , UpperCamelCase__=3072 , UpperCamelCase__=768 , UpperCamelCase__=12 , UpperCamelCase__=8 , UpperCamelCase__=512 , UpperCamelCase__="gelu" , UpperCamelCase__=1e-12 , UpperCamelCase__=0.0 , UpperCamelCase__=0.0 , UpperCamelCase__=0.02 , UpperCamelCase__=3_0522 , UpperCamelCase__=2 , UpperCamelCase__=0 , UpperCamelCase__=102 , UpperCamelCase__=True , UpperCamelCase__=True , **UpperCamelCase__ , ) -> str:
super().__init__(
pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , sep_token_id=UpperCamelCase__ , **UpperCamelCase__ , )
lowerCamelCase : List[Any] = vocab_size
lowerCamelCase : Union[str, Any] = hidden_size
lowerCamelCase : List[Any] = encoder_hidden_size
lowerCamelCase : List[str] = intermediate_size
lowerCamelCase : Tuple = projection_dim
lowerCamelCase : List[Any] = hidden_dropout_prob
lowerCamelCase : int = num_hidden_layers
lowerCamelCase : List[Any] = num_attention_heads
lowerCamelCase : Optional[Any] = max_position_embeddings
lowerCamelCase : List[Any] = layer_norm_eps
lowerCamelCase : Optional[Any] = hidden_act
lowerCamelCase : str = initializer_range
lowerCamelCase : List[str] = attention_probs_dropout_prob
lowerCamelCase : List[Any] = is_decoder
lowerCamelCase : List[Any] = use_cache
@classmethod
def _lowercase ( cls , UpperCamelCase__ , **UpperCamelCase__ ) -> "PretrainedConfig":
cls._set_token_in_kwargs(UpperCamelCase__ )
lowerCamelCase , lowerCamelCase : Dict = cls.get_config_dict(UpperCamelCase__ , **UpperCamelCase__ )
# get the text config dict if we are loading from BlipConfig
if config_dict.get("model_type" ) == "blip":
lowerCamelCase : List[str] = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(UpperCamelCase__ , **UpperCamelCase__ )
class UpperCamelCase__ (lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ : Any = """blip_vision_model"""
def __init__( self , UpperCamelCase__=768 , UpperCamelCase__=3072 , UpperCamelCase__=512 , UpperCamelCase__=12 , UpperCamelCase__=12 , UpperCamelCase__=384 , UpperCamelCase__=16 , UpperCamelCase__="gelu" , UpperCamelCase__=1e-5 , UpperCamelCase__=0.0 , UpperCamelCase__=1e-10 , **UpperCamelCase__ , ) -> Any:
super().__init__(**UpperCamelCase__ )
lowerCamelCase : Dict = hidden_size
lowerCamelCase : List[Any] = intermediate_size
lowerCamelCase : List[Any] = projection_dim
lowerCamelCase : int = num_hidden_layers
lowerCamelCase : List[Any] = num_attention_heads
lowerCamelCase : int = patch_size
lowerCamelCase : Optional[Any] = image_size
lowerCamelCase : int = initializer_range
lowerCamelCase : Optional[int] = attention_dropout
lowerCamelCase : int = layer_norm_eps
lowerCamelCase : Optional[Any] = hidden_act
@classmethod
def _lowercase ( cls , UpperCamelCase__ , **UpperCamelCase__ ) -> "PretrainedConfig":
cls._set_token_in_kwargs(UpperCamelCase__ )
lowerCamelCase , lowerCamelCase : int = cls.get_config_dict(UpperCamelCase__ , **UpperCamelCase__ )
# get the vision config dict if we are loading from BlipConfig
if config_dict.get("model_type" ) == "blip":
lowerCamelCase : str = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(UpperCamelCase__ , **UpperCamelCase__ )
class UpperCamelCase__ (lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ : int = """blip"""
lowerCamelCase_ : List[Any] = True
def __init__( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=512 , UpperCamelCase__=2.6592 , UpperCamelCase__=256 , **UpperCamelCase__ , ) -> Optional[int]:
super().__init__(**UpperCamelCase__ )
if text_config is None:
lowerCamelCase : Union[str, Any] = {}
logger.info("`text_config` is `None`. Initializing the `BlipTextConfig` with default values." )
if vision_config is None:
lowerCamelCase : int = {}
logger.info("`vision_config` is `None`. Initializing the `BlipVisionConfig` with default values." )
lowerCamelCase : int = BlipTextConfig(**UpperCamelCase__ )
lowerCamelCase : Tuple = BlipVisionConfig(**UpperCamelCase__ )
lowerCamelCase : Optional[int] = self.vision_config.hidden_size
lowerCamelCase : Tuple = projection_dim
lowerCamelCase : List[Any] = logit_scale_init_value
lowerCamelCase : str = 1.0
lowerCamelCase : Optional[int] = 0.02
lowerCamelCase : Dict = image_text_hidden_size
@classmethod
def _lowercase ( cls , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ) -> Any:
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **UpperCamelCase__ )
def _lowercase ( self ) -> Union[str, Any]:
lowerCamelCase : Dict = copy.deepcopy(self.__dict__ )
lowerCamelCase : str = self.text_config.to_dict()
lowerCamelCase : str = self.vision_config.to_dict()
lowerCamelCase : Any = self.__class__.model_type
return output
| 311
| 1
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase = logging.get_logger(__name__)
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = """huggingface/label-files"""
lowerCAmelCase__ : Union[str, Any] = """imagenet-1k-id2label.json"""
lowerCAmelCase__ : List[str] = json.load(open(hf_hub_download(UpperCamelCase , UpperCamelCase , repo_type="""dataset""" ) , """r""" ) )
lowerCAmelCase__ : List[Any] = {int(UpperCamelCase ): v for k, v in idalabel.items()}
lowerCAmelCase__ : Optional[Any] = {v: k for k, v in idalabel.items()}
lowerCAmelCase__ : List[Any] = """std_conv""" if """bit""" in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
lowerCAmelCase__ : int = BitConfig(
conv_layer=UpperCamelCase , num_labels=1000 , idalabel=UpperCamelCase , labelaid=UpperCamelCase , )
return config
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
if "stem.conv" in name:
lowerCAmelCase__ : Tuple = name.replace("""stem.conv""" , """bit.embedder.convolution""" )
if "blocks" in name:
lowerCAmelCase__ : Optional[Any] = name.replace("""blocks""" , """layers""" )
if "head.fc" in name:
lowerCAmelCase__ : List[str] = name.replace("""head.fc""" , """classifier.1""" )
if name.startswith("""norm""" ):
lowerCAmelCase__ : str = """bit.""" + name
if "bit" not in name and "classifier" not in name:
lowerCAmelCase__ : str = """bit.encoder.""" + name
return name
def _SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowerCAmelCase__ : Dict = Image.open(requests.get(UpperCamelCase , stream=UpperCamelCase ).raw )
return im
@torch.no_grad()
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase=False ):
"""simple docstring"""
lowerCAmelCase__ : str = get_config(UpperCamelCase )
# load original model from timm
lowerCAmelCase__ : Any = create_model(UpperCamelCase , pretrained=UpperCamelCase )
timm_model.eval()
# load state_dict of original model
lowerCAmelCase__ : Tuple = timm_model.state_dict()
for key in state_dict.copy().keys():
lowerCAmelCase__ : str = state_dict.pop(UpperCamelCase )
lowerCAmelCase__ : Tuple = val.squeeze() if """head""" in key else val
# load HuggingFace model
lowerCAmelCase__ : str = BitForImageClassification(UpperCamelCase )
model.eval()
model.load_state_dict(UpperCamelCase )
# create image processor
lowerCAmelCase__ : str = create_transform(**resolve_data_config({} , model=UpperCamelCase ) )
lowerCAmelCase__ : int = transform.transforms
lowerCAmelCase__ : List[str] = {
"""bilinear""": PILImageResampling.BILINEAR,
"""bicubic""": PILImageResampling.BICUBIC,
"""nearest""": PILImageResampling.NEAREST,
}
lowerCAmelCase__ : int = BitImageProcessor(
do_resize=UpperCamelCase , size={"""shortest_edge""": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=UpperCamelCase , crop_size={"""height""": timm_transforms[1].size[0], """width""": timm_transforms[1].size[1]} , do_normalize=UpperCamelCase , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
lowerCAmelCase__ : List[Any] = prepare_img()
lowerCAmelCase__ : Tuple = transform(UpperCamelCase ).unsqueeze(0 )
lowerCAmelCase__ : Union[str, Any] = processor(UpperCamelCase , return_tensors="""pt""" ).pixel_values
# verify pixel values
assert torch.allclose(UpperCamelCase , UpperCamelCase )
# verify logits
with torch.no_grad():
lowerCAmelCase__ : List[Any] = model(UpperCamelCase )
lowerCAmelCase__ : Optional[Any] = outputs.logits
print("""Logits:""" , logits[0, :3] )
print("""Predicted class:""" , model.config.idalabel[logits.argmax(-1 ).item()] )
lowerCAmelCase__ : Tuple = timm_model(UpperCamelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(UpperCamelCase , outputs.logits , atol=1e-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
Path(UpperCamelCase ).mkdir(exist_ok=UpperCamelCase )
print(f"""Saving model {model_name} and processor to {pytorch_dump_folder_path}""" )
model.save_pretrained(UpperCamelCase )
processor.save_pretrained(UpperCamelCase )
if push_to_hub:
print(f"""Pushing model {model_name} and processor to the hub""" )
model.push_to_hub(f"""ybelkada/{model_name}""" )
processor.push_to_hub(f"""ybelkada/{model_name}""" )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''resnetv2_50x1_bitm''',
type=str,
help='''Name of the BiT timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model to the hub.''',
)
_lowerCAmelCase = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 720
|
'''simple docstring'''
from typing import List, Optional, Union
import torch
from transformers import (
XLMRobertaTokenizer,
)
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
from .text_encoder import MultilingualCLIP
_lowerCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
_lowerCAmelCase = '''
Examples:
```py
>>> from diffusers import KandinskyPipeline, KandinskyPriorPipeline
>>> import torch
>>> pipe_prior = KandinskyPriorPipeline.from_pretrained("kandinsky-community/Kandinsky-2-1-prior")
>>> pipe_prior.to("cuda")
>>> prompt = "red cat, 4k photo"
>>> out = pipe_prior(prompt)
>>> image_emb = out.image_embeds
>>> negative_image_emb = out.negative_image_embeds
>>> pipe = KandinskyPipeline.from_pretrained("kandinsky-community/kandinsky-2-1")
>>> pipe.to("cuda")
>>> image = pipe(
... prompt,
... image_embeds=image_emb,
... negative_image_embeds=negative_image_emb,
... height=768,
... width=768,
... num_inference_steps=100,
... ).images
>>> image[0].save("cat.png")
```
'''
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase=8 ):
"""simple docstring"""
lowerCAmelCase__ : Any = h // scale_factor**2
if h % scale_factor**2 != 0:
new_h += 1
lowerCAmelCase__ : List[str] = w // scale_factor**2
if w % scale_factor**2 != 0:
new_w += 1
return new_h * scale_factor, new_w * scale_factor
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,) -> int:
super().__init__()
self.register_modules(
text_encoder=__UpperCAmelCase ,tokenizer=__UpperCAmelCase ,unet=__UpperCAmelCase ,scheduler=__UpperCAmelCase ,movq=__UpperCAmelCase ,)
lowerCAmelCase__ : int = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> Optional[int]:
if latents is None:
lowerCAmelCase__ : Optional[int] = randn_tensor(__UpperCAmelCase ,generator=__UpperCAmelCase ,device=__UpperCAmelCase ,dtype=__UpperCAmelCase )
else:
if latents.shape != shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {shape}""" )
lowerCAmelCase__ : int = latents.to(__UpperCAmelCase )
lowerCAmelCase__ : Optional[Any] = latents * scheduler.init_noise_sigma
return latents
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase=None ,) -> Any:
lowerCAmelCase__ : List[str] = len(__UpperCAmelCase ) if isinstance(__UpperCAmelCase ,__UpperCAmelCase ) else 1
# get prompt text embeddings
lowerCAmelCase__ : Any = self.tokenizer(
__UpperCAmelCase ,padding="""max_length""" ,truncation=__UpperCAmelCase ,max_length=77 ,return_attention_mask=__UpperCAmelCase ,add_special_tokens=__UpperCAmelCase ,return_tensors="""pt""" ,)
lowerCAmelCase__ : Tuple = text_inputs.input_ids
lowerCAmelCase__ : Optional[int] = self.tokenizer(__UpperCAmelCase ,padding="""longest""" ,return_tensors="""pt""" ).input_ids
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(__UpperCAmelCase ,__UpperCAmelCase ):
lowerCAmelCase__ : Optional[int] = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
F""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
lowerCAmelCase__ : List[Any] = text_input_ids.to(__UpperCAmelCase )
lowerCAmelCase__ : List[Any] = text_inputs.attention_mask.to(__UpperCAmelCase )
lowerCAmelCase__ , lowerCAmelCase__ : Any = self.text_encoder(
input_ids=__UpperCAmelCase ,attention_mask=__UpperCAmelCase )
lowerCAmelCase__ : Optional[int] = prompt_embeds.repeat_interleave(__UpperCAmelCase ,dim=0 )
lowerCAmelCase__ : Optional[int] = text_encoder_hidden_states.repeat_interleave(__UpperCAmelCase ,dim=0 )
lowerCAmelCase__ : Union[str, Any] = text_mask.repeat_interleave(__UpperCAmelCase ,dim=0 )
if do_classifier_free_guidance:
lowerCAmelCase__ : List[str]
if negative_prompt is None:
lowerCAmelCase__ : Tuple = [""""""] * batch_size
elif type(__UpperCAmelCase ) is not type(__UpperCAmelCase ):
raise TypeError(
F"""`negative_prompt` should be the same type to `prompt`, but got {type(__UpperCAmelCase )} !="""
F""" {type(__UpperCAmelCase )}.""" )
elif isinstance(__UpperCAmelCase ,__UpperCAmelCase ):
lowerCAmelCase__ : Union[str, Any] = [negative_prompt]
elif batch_size != len(__UpperCAmelCase ):
raise ValueError(
F"""`negative_prompt`: {negative_prompt} has batch size {len(__UpperCAmelCase )}, but `prompt`:"""
F""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"""
""" the batch size of `prompt`.""" )
else:
lowerCAmelCase__ : int = negative_prompt
lowerCAmelCase__ : List[str] = self.tokenizer(
__UpperCAmelCase ,padding="""max_length""" ,max_length=77 ,truncation=__UpperCAmelCase ,return_attention_mask=__UpperCAmelCase ,add_special_tokens=__UpperCAmelCase ,return_tensors="""pt""" ,)
lowerCAmelCase__ : Dict = uncond_input.input_ids.to(__UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = uncond_input.attention_mask.to(__UpperCAmelCase )
lowerCAmelCase__ , lowerCAmelCase__ : int = self.text_encoder(
input_ids=__UpperCAmelCase ,attention_mask=__UpperCAmelCase )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
lowerCAmelCase__ : Optional[int] = negative_prompt_embeds.shape[1]
lowerCAmelCase__ : Optional[Any] = negative_prompt_embeds.repeat(1 ,__UpperCAmelCase )
lowerCAmelCase__ : List[Any] = negative_prompt_embeds.view(batch_size * num_images_per_prompt ,__UpperCAmelCase )
lowerCAmelCase__ : Optional[int] = uncond_text_encoder_hidden_states.shape[1]
lowerCAmelCase__ : Tuple = uncond_text_encoder_hidden_states.repeat(1 ,__UpperCAmelCase ,1 )
lowerCAmelCase__ : Optional[int] = uncond_text_encoder_hidden_states.view(
batch_size * num_images_per_prompt ,__UpperCAmelCase ,-1 )
lowerCAmelCase__ : List[str] = uncond_text_mask.repeat_interleave(__UpperCAmelCase ,dim=0 )
# done duplicates
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowerCAmelCase__ : Union[str, Any] = torch.cat([negative_prompt_embeds, prompt_embeds] )
lowerCAmelCase__ : Tuple = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states] )
lowerCAmelCase__ : Tuple = torch.cat([uncond_text_mask, text_mask] )
return prompt_embeds, text_encoder_hidden_states, text_mask
def UpperCAmelCase_ ( self ,__UpperCAmelCase=0 ) -> Optional[Any]:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
lowerCAmelCase__ : Any = torch.device(F"""cuda:{gpu_id}""" )
lowerCAmelCase__ : Union[str, Any] = [
self.unet,
self.text_encoder,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(__UpperCAmelCase ,__UpperCAmelCase )
def UpperCAmelCase_ ( self ,__UpperCAmelCase=0 ) -> Optional[int]:
if is_accelerate_available() and is_accelerate_version(""">=""" ,"""0.17.0.dev0""" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""" )
lowerCAmelCase__ : List[str] = torch.device(F"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to("""cpu""" ,silence_dtype_warnings=__UpperCAmelCase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
lowerCAmelCase__ : Optional[int] = None
for cpu_offloaded_model in [self.text_encoder, self.unet, self.movq]:
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = cpu_offload_with_hook(__UpperCAmelCase ,__UpperCAmelCase ,prev_module_hook=__UpperCAmelCase )
if self.safety_checker is not None:
lowerCAmelCase__ , lowerCAmelCase__ : Dict = cpu_offload_with_hook(self.safety_checker ,__UpperCAmelCase ,prev_module_hook=__UpperCAmelCase )
# We'll offload the last model manually.
lowerCAmelCase__ : Union[str, Any] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def UpperCAmelCase_ ( self ) -> Optional[int]:
if not hasattr(self.unet ,"""_hf_hook""" ):
return self.device
for module in self.unet.modules():
if (
hasattr(__UpperCAmelCase ,"""_hf_hook""" )
and hasattr(module._hf_hook ,"""execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(__UpperCAmelCase )
def __call__( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase = None ,__UpperCAmelCase = 512 ,__UpperCAmelCase = 512 ,__UpperCAmelCase = 100 ,__UpperCAmelCase = 4.0 ,__UpperCAmelCase = 1 ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = "pil" ,__UpperCAmelCase = True ,) -> Tuple:
if isinstance(__UpperCAmelCase ,__UpperCAmelCase ):
lowerCAmelCase__ : Dict = 1
elif isinstance(__UpperCAmelCase ,__UpperCAmelCase ):
lowerCAmelCase__ : Dict = len(__UpperCAmelCase )
else:
raise ValueError(F"""`prompt` has to be of type `str` or `list` but is {type(__UpperCAmelCase )}""" )
lowerCAmelCase__ : str = self._execution_device
lowerCAmelCase__ : List[Any] = batch_size * num_images_per_prompt
lowerCAmelCase__ : int = guidance_scale > 1.0
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Any = self._encode_prompt(
__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase )
if isinstance(__UpperCAmelCase ,__UpperCAmelCase ):
lowerCAmelCase__ : List[str] = torch.cat(__UpperCAmelCase ,dim=0 )
if isinstance(__UpperCAmelCase ,__UpperCAmelCase ):
lowerCAmelCase__ : Optional[Any] = torch.cat(__UpperCAmelCase ,dim=0 )
if do_classifier_free_guidance:
lowerCAmelCase__ : Union[str, Any] = image_embeds.repeat_interleave(__UpperCAmelCase ,dim=0 )
lowerCAmelCase__ : Optional[Any] = negative_image_embeds.repeat_interleave(__UpperCAmelCase ,dim=0 )
lowerCAmelCase__ : Any = torch.cat([negative_image_embeds, image_embeds] ,dim=0 ).to(
dtype=prompt_embeds.dtype ,device=__UpperCAmelCase )
self.scheduler.set_timesteps(__UpperCAmelCase ,device=__UpperCAmelCase )
lowerCAmelCase__ : Optional[Any] = self.scheduler.timesteps
lowerCAmelCase__ : Dict = self.unet.config.in_channels
lowerCAmelCase__ , lowerCAmelCase__ : Any = get_new_h_w(__UpperCAmelCase ,__UpperCAmelCase ,self.movq_scale_factor )
# create initial latent
lowerCAmelCase__ : Dict = self.prepare_latents(
(batch_size, num_channels_latents, height, width) ,text_encoder_hidden_states.dtype ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,self.scheduler ,)
for i, t in enumerate(self.progress_bar(__UpperCAmelCase ) ):
# expand the latents if we are doing classifier free guidance
lowerCAmelCase__ : Optional[int] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowerCAmelCase__ : Optional[Any] = {"""text_embeds""": prompt_embeds, """image_embeds""": image_embeds}
lowerCAmelCase__ : Any = self.unet(
sample=__UpperCAmelCase ,timestep=__UpperCAmelCase ,encoder_hidden_states=__UpperCAmelCase ,added_cond_kwargs=__UpperCAmelCase ,return_dict=__UpperCAmelCase ,)[0]
if do_classifier_free_guidance:
lowerCAmelCase__ , lowerCAmelCase__ : Any = noise_pred.split(latents.shape[1] ,dim=1 )
lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = noise_pred.chunk(2 )
lowerCAmelCase__ , lowerCAmelCase__ : int = variance_pred.chunk(2 )
lowerCAmelCase__ : List[str] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
lowerCAmelCase__ : str = torch.cat([noise_pred, variance_pred_text] ,dim=1 )
if not (
hasattr(self.scheduler.config ,"""variance_type""" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
lowerCAmelCase__ , lowerCAmelCase__ : Any = noise_pred.split(latents.shape[1] ,dim=1 )
# compute the previous noisy sample x_t -> x_t-1
lowerCAmelCase__ : Optional[int] = self.scheduler.step(
__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,generator=__UpperCAmelCase ,).prev_sample
# post-processing
lowerCAmelCase__ : int = self.movq.decode(__UpperCAmelCase ,force_not_quantize=__UpperCAmelCase )["""sample"""]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
lowerCAmelCase__ : Tuple = image * 0.5 + 0.5
lowerCAmelCase__ : str = image.clamp(0 ,1 )
lowerCAmelCase__ : List[Any] = image.cpu().permute(0 ,2 ,3 ,1 ).float().numpy()
if output_type == "pil":
lowerCAmelCase__ : int = self.numpy_to_pil(__UpperCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__UpperCAmelCase )
| 160
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_a : List[Any] = {'configuration_xlnet': ['XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XLNetConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Tuple = ['XLNetTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Union[str, Any] = ['XLNetTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Any = [
'XLNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLNetForMultipleChoice',
'XLNetForQuestionAnswering',
'XLNetForQuestionAnsweringSimple',
'XLNetForSequenceClassification',
'XLNetForTokenClassification',
'XLNetLMHeadModel',
'XLNetModel',
'XLNetPreTrainedModel',
'load_tf_weights_in_xlnet',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Optional[Any] = [
'TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXLNetForMultipleChoice',
'TFXLNetForQuestionAnsweringSimple',
'TFXLNetForSequenceClassification',
'TFXLNetForTokenClassification',
'TFXLNetLMHeadModel',
'TFXLNetMainLayer',
'TFXLNetModel',
'TFXLNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
_a : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 447
|
'''simple docstring'''
import qiskit
def lowerCamelCase__ ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ):
UpperCAmelCase = qiskit.Aer.get_backend('aer_simulator' )
# Create a Quantum Circuit acting on the q register
UpperCAmelCase = qiskit.QuantumCircuit(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Map the quantum measurement to the classical bits
circuit.measure([0] , [0] )
# Execute the circuit on the simulator
UpperCAmelCase = qiskit.execute(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , shots=1000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print(F'''Total count for various states are: {single_qubit_measure(1, 1)}''')
| 447
| 1
|
"""simple docstring"""
from pathlib import Path
import fire
def A_ ( _lowerCAmelCase : str, _lowerCAmelCase : str, _lowerCAmelCase : int ):
"""simple docstring"""
_a = Path(_lowerCAmelCase )
_a = Path(_lowerCAmelCase )
dest_dir.mkdir(exist_ok=_lowerCAmelCase )
for path in src_dir.iterdir():
_a = [x.rstrip() for x in list(path.open().readlines() )][:n]
_a = dest_dir.joinpath(path.name )
print(_lowerCAmelCase )
dest_path.open('''w''' ).write('''\n'''.join(_lowerCAmelCase ) )
if __name__ == "__main__":
fire.Fire(minify)
| 285
|
"""simple docstring"""
def A_ ( _lowerCAmelCase : Dict=2_81_23 ):
"""simple docstring"""
_a = [1] * (limit + 1)
for i in range(2, int(limit**0.5 ) + 1 ):
sum_divs[i * i] += i
for k in range(i + 1, limit // i + 1 ):
sum_divs[k * i] += k + i
_a = set()
_a = 0
for n in range(1, limit + 1 ):
if sum_divs[n] > n:
abundants.add(_lowerCAmelCase )
if not any((n - a in abundants) for a in abundants ):
res += n
return res
if __name__ == "__main__":
print(solution())
| 285
| 1
|
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase_ = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
UpperCAmelCase_ = 25_00_04
UpperCAmelCase_ = 25_00_20
@require_sentencepiece
@require_tokenizers
class __UpperCamelCase ( A__ , unittest.TestCase ):
__A : Union[str, Any] = MBartaaTokenizer
__A : Any = MBartaaTokenizerFast
__A : Dict = True
__A : Any = True
def UpperCamelCase( self ):
super().setUp()
# We have a SentencePiece fixture for testing
_UpperCAmelCase = MBartaaTokenizer(_UpperCamelCase , src_lang='''en_XX''' , tgt_lang='''ro_RO''' , keep_accents=_UpperCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase( self ):
_UpperCAmelCase = '''<s>'''
_UpperCAmelCase = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCamelCase ) , _UpperCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCamelCase ) , _UpperCamelCase )
def UpperCamelCase( self ):
_UpperCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-1] , '''<mask>''' )
self.assertEqual(len(_UpperCamelCase ) , 1054 )
def UpperCamelCase( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1054 )
def UpperCamelCase( self ):
_UpperCAmelCase = MBartaaTokenizer(_UpperCamelCase , src_lang='''en_XX''' , tgt_lang='''ro_RO''' , keep_accents=_UpperCamelCase )
_UpperCAmelCase = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(_UpperCamelCase , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_UpperCamelCase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
_UpperCAmelCase = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_UpperCamelCase , [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.'''] , )
_UpperCAmelCase = tokenizer.convert_tokens_to_ids(_UpperCamelCase )
self.assertListEqual(
_UpperCamelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
_UpperCAmelCase = tokenizer.convert_ids_to_tokens(_UpperCamelCase )
self.assertListEqual(
_UpperCamelCase , [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.'''] , )
@slow
def UpperCamelCase( self ):
# fmt: off
_UpperCAmelCase = {'''input_ids''': [[250004, 11062, 82772, 7, 15, 82772, 538, 51529, 237, 17198, 1290, 206, 9, 215175, 1314, 136, 17198, 1290, 206, 9, 56359, 42, 122009, 9, 16466, 16, 87344, 4537, 9, 4717, 78381, 6, 159958, 7, 15, 24480, 618, 4, 527, 22693, 5428, 4, 2777, 24480, 9874, 4, 43523, 594, 4, 803, 18392, 33189, 18, 4, 43523, 24447, 12399, 100, 24955, 83658, 9626, 144057, 15, 839, 22335, 16, 136, 24955, 83658, 83479, 15, 39102, 724, 16, 678, 645, 2789, 1328, 4589, 42, 122009, 115774, 23, 805, 1328, 46876, 7, 136, 53894, 1940, 42227, 41159, 17721, 823, 425, 4, 27512, 98722, 206, 136, 5531, 4970, 919, 17336, 5, 2], [250004, 20080, 618, 83, 82775, 47, 479, 9, 1517, 73, 53894, 333, 80581, 110117, 18811, 5256, 1295, 51, 152526, 297, 7986, 390, 124416, 538, 35431, 214, 98, 15044, 25737, 136, 7108, 43701, 23, 756, 135355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [250004, 581, 63773, 119455, 6, 147797, 88203, 7, 645, 70, 21, 3285, 10269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_UpperCamelCase , model_name='''facebook/mbart-large-50''' , revision='''d3913889c59cd5c9e456b269c376325eabad57e2''' , )
def UpperCamelCase( self ):
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
_UpperCAmelCase = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-random-mbart50''', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(_UpperCamelCase , **_UpperCamelCase )
_UpperCAmelCase = self.tokenizer_class.from_pretrained(_UpperCamelCase , **_UpperCamelCase )
_UpperCAmelCase = tempfile.mkdtemp()
_UpperCAmelCase = tokenizer_r.save_pretrained(_UpperCamelCase )
_UpperCAmelCase = tokenizer_p.save_pretrained(_UpperCamelCase )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
_UpperCAmelCase = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f )
self.assertSequenceEqual(_UpperCamelCase , _UpperCamelCase )
# Checks everything loads correctly in the same way
_UpperCAmelCase = tokenizer_r.from_pretrained(_UpperCamelCase )
_UpperCAmelCase = tokenizer_p.from_pretrained(_UpperCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_UpperCamelCase , _UpperCamelCase ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(_UpperCamelCase )
# Save tokenizer rust, legacy_format=True
_UpperCAmelCase = tempfile.mkdtemp()
_UpperCAmelCase = tokenizer_r.save_pretrained(_UpperCamelCase , legacy_format=_UpperCamelCase )
_UpperCAmelCase = tokenizer_p.save_pretrained(_UpperCamelCase )
# Checks it save with the same files
self.assertSequenceEqual(_UpperCamelCase , _UpperCamelCase )
# Checks everything loads correctly in the same way
_UpperCAmelCase = tokenizer_r.from_pretrained(_UpperCamelCase )
_UpperCAmelCase = tokenizer_p.from_pretrained(_UpperCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_UpperCamelCase , _UpperCamelCase ) )
shutil.rmtree(_UpperCamelCase )
# Save tokenizer rust, legacy_format=False
_UpperCAmelCase = tempfile.mkdtemp()
_UpperCAmelCase = tokenizer_r.save_pretrained(_UpperCamelCase , legacy_format=_UpperCamelCase )
_UpperCAmelCase = tokenizer_p.save_pretrained(_UpperCamelCase )
# Checks it saved the tokenizer.json file
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
_UpperCAmelCase = tokenizer_r.from_pretrained(_UpperCamelCase )
_UpperCAmelCase = tokenizer_p.from_pretrained(_UpperCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_UpperCamelCase , _UpperCamelCase ) )
shutil.rmtree(_UpperCamelCase )
@require_torch
@require_sentencepiece
@require_tokenizers
class __UpperCamelCase ( unittest.TestCase ):
__A : Dict = """facebook/mbart-large-50-one-to-many-mmt"""
__A : Optional[int] = [
""" UN Chief Says There Is No Military Solution in Syria""",
""" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.""",
]
__A : Optional[int] = [
"""Şeful ONU declară că nu există o soluţie militară în Siria""",
"""Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"""
""" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"""
""" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.""",
]
__A : Any = [EN_CODE, 82_74, 12_78_73, 2_59_16, 7, 86_22, 20_71, 4_38, 6_74_85, 53, 18_78_95, 23, 5_17_12, 2]
@classmethod
def UpperCamelCase( cls ):
_UpperCAmelCase = MBartaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='''en_XX''' , tgt_lang='''ro_RO''' )
_UpperCAmelCase = 1
return cls
def UpperCamelCase( self ):
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ar_AR'''] , 250001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''en_EN'''] , 250004 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ro_RO'''] , 250020 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''mr_IN'''] , 250038 )
def UpperCamelCase( self ):
_UpperCAmelCase = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , _UpperCamelCase )
def UpperCamelCase( self ):
self.assertIn(_UpperCamelCase , self.tokenizer.all_special_ids )
_UpperCAmelCase = [RO_CODE, 884, 9019, 96, 9, 916, 86792, 36, 18743, 15596, 5, 2]
_UpperCAmelCase = self.tokenizer.decode(_UpperCamelCase , skip_special_tokens=_UpperCamelCase )
_UpperCAmelCase = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertNotIn(self.tokenizer.eos_token , _UpperCamelCase )
def UpperCamelCase( self ):
_UpperCAmelCase = ['''this is gunna be a long sentence ''' * 20]
assert isinstance(src_text[0] , _UpperCamelCase )
_UpperCAmelCase = 10
_UpperCAmelCase = self.tokenizer(_UpperCamelCase , max_length=_UpperCamelCase , truncation=_UpperCamelCase ).input_ids[0]
self.assertEqual(ids[0] , _UpperCamelCase )
self.assertEqual(ids[-1] , 2 )
self.assertEqual(len(_UpperCamelCase ) , _UpperCamelCase )
def UpperCamelCase( self ):
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''ar_AR'''] ) , [250053, 250001] )
def UpperCamelCase( self ):
_UpperCAmelCase = tempfile.mkdtemp()
_UpperCAmelCase = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(_UpperCamelCase )
_UpperCAmelCase = MBartaaTokenizer.from_pretrained(_UpperCamelCase )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , _UpperCamelCase )
@require_torch
def UpperCamelCase( self ):
_UpperCAmelCase = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=_UpperCamelCase , return_tensors='''pt''' )
_UpperCAmelCase = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == RO_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE]
@require_torch
def UpperCamelCase( self ):
_UpperCAmelCase = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=_UpperCamelCase , truncation=_UpperCamelCase , max_length=len(self.expected_src_tokens ) , return_tensors='''pt''' , )
_UpperCAmelCase = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
self.assertIsInstance(_UpperCamelCase , _UpperCamelCase )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
_UpperCAmelCase = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , _UpperCamelCase )
self.assertEqual(2 , batch.decoder_input_ids[0, 0] ) # decoder_start_token_id
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def UpperCamelCase( self ):
_UpperCAmelCase = self.tokenizer(self.src_text , padding=_UpperCamelCase , truncation=_UpperCamelCase , max_length=3 , return_tensors='''pt''' )
_UpperCAmelCase = self.tokenizer(
text_target=self.tgt_text , padding=_UpperCamelCase , truncation=_UpperCamelCase , max_length=10 , return_tensors='''pt''' )
_UpperCAmelCase = targets['''input_ids''']
_UpperCAmelCase = shift_tokens_right(_UpperCamelCase , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def UpperCamelCase( self ):
_UpperCAmelCase = self.tokenizer._build_translation_inputs(
'''A test''' , return_tensors='''pt''' , src_lang='''en_XX''' , tgt_lang='''ar_AR''' )
self.assertEqual(
nested_simplify(_UpperCamelCase ) , {
# en_XX, A, test, EOS
'''input_ids''': [[250004, 62, 3034, 2]],
'''attention_mask''': [[1, 1, 1, 1]],
# ar_AR
'''forced_bos_token_id''': 250001,
} , )
| 32
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
"microsoft/biogpt": "https://huggingface.co/microsoft/biogpt/resolve/main/config.json",
# See all BioGPT models at https://huggingface.co/models?filter=biogpt
}
class __UpperCamelCase ( A__ ):
__A : Any = """biogpt"""
def __init__( self , _UpperCamelCase=42384 , _UpperCamelCase=1024 , _UpperCamelCase=24 , _UpperCamelCase=16 , _UpperCamelCase=4096 , _UpperCamelCase="gelu" , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=1024 , _UpperCamelCase=0.02 , _UpperCamelCase=1e-12 , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=0.0 , _UpperCamelCase=0.0 , _UpperCamelCase=1 , _UpperCamelCase=0 , _UpperCamelCase=2 , **_UpperCamelCase , ):
_UpperCAmelCase = vocab_size
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = scale_embedding
_UpperCAmelCase = use_cache
_UpperCAmelCase = layerdrop
_UpperCAmelCase = activation_dropout
super().__init__(pad_token_id=_UpperCamelCase , bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase )
| 32
| 1
|
from collections.abc import Iterable
from typing import Generic, TypeVar
__snake_case = TypeVar("_T")
class UpperCAmelCase ( Generic[_T] ):
def __init__( self : Optional[Any] , __magic_name__ : Iterable[_T] | None = None ):
"""simple docstring"""
UpperCamelCase = list(iterable or [] )
UpperCamelCase = []
def __len__( self : Optional[Any] ):
"""simple docstring"""
return len(self._stacka ) + len(self._stacka )
def __repr__( self : List[str] ):
"""simple docstring"""
return F'Queue({tuple(self._stacka[::-1] + self._stacka )})'
def lowerCamelCase_ ( self : Tuple , __magic_name__ : _T ):
"""simple docstring"""
self._stacka.append(__magic_name__ )
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
UpperCamelCase = self._stacka.pop
UpperCamelCase = self._stacka.append
if not self._stacka:
while self._stacka:
stacka_append(stacka_pop() )
if not self._stacka:
raise IndexError("""Queue is empty""" )
return self._stacka.pop()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 711
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import OwlViTImageProcessor, OwlViTProcessor
@require_vision
class UpperCAmelCase ( unittest.TestCase ):
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = tempfile.mkdtemp()
# fmt: off
UpperCamelCase = ["""""", """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
UpperCamelCase = dict(zip(__magic_name__ , range(len(__magic_name__ ) ) ) )
UpperCamelCase = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>""", """"""]
UpperCamelCase = {"""unk_token""": """<unk>"""}
UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__magic_name__ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(__magic_name__ ) )
UpperCamelCase = {
"""do_resize""": True,
"""size""": 2_0,
"""do_center_crop""": True,
"""crop_size""": 1_8,
"""do_normalize""": True,
"""image_mean""": [0.48_145_466, 0.4_578_275, 0.40_821_073],
"""image_std""": [0.26_862_954, 0.26_130_258, 0.27_577_711],
}
UpperCamelCase = os.path.join(self.tmpdirname , __magic_name__ )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(__magic_name__ , __magic_name__ )
def lowerCamelCase_ ( self : str , **__magic_name__ : Tuple ):
"""simple docstring"""
return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token="""!""" , **__magic_name__ )
def lowerCamelCase_ ( self : Optional[int] , **__magic_name__ : str ):
"""simple docstring"""
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token="""!""" , **__magic_name__ )
def lowerCamelCase_ ( self : Optional[int] , **__magic_name__ : Optional[Any] ):
"""simple docstring"""
return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **__magic_name__ )
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
UpperCamelCase = [Image.fromarray(np.moveaxis(__magic_name__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = self.get_rust_tokenizer()
UpperCamelCase = self.get_image_processor()
UpperCamelCase = OwlViTProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
processor_slow.save_pretrained(self.tmpdirname )
UpperCamelCase = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=__magic_name__ )
UpperCamelCase = OwlViTProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
processor_fast.save_pretrained(self.tmpdirname )
UpperCamelCase = OwlViTProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __magic_name__ )
self.assertIsInstance(processor_fast.tokenizer , __magic_name__ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __magic_name__ )
self.assertIsInstance(processor_fast.image_processor , __magic_name__ )
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
UpperCamelCase = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
UpperCamelCase = self.get_image_processor(do_normalize=__magic_name__ )
UpperCamelCase = OwlViTProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=__magic_name__ )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __magic_name__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __magic_name__ )
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase = self.get_image_processor()
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = OwlViTProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
UpperCamelCase = self.prepare_image_inputs()
UpperCamelCase = image_processor(__magic_name__ , return_tensors="""np""" )
UpperCamelCase = processor(images=__magic_name__ , return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase = self.get_image_processor()
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = OwlViTProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
UpperCamelCase = """lower newer"""
UpperCamelCase = processor(text=__magic_name__ , return_tensors="""np""" )
UpperCamelCase = tokenizer(__magic_name__ , return_tensors="""np""" )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist() )
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = self.get_image_processor()
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = OwlViTProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
UpperCamelCase = """lower newer"""
UpperCamelCase = self.prepare_image_inputs()
UpperCamelCase = processor(text=__magic_name__ , images=__magic_name__ )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(__magic_name__ ):
processor()
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
UpperCamelCase = """google/owlvit-base-patch32"""
UpperCamelCase = OwlViTProcessor.from_pretrained(__magic_name__ )
UpperCamelCase = ["""cat""", """nasa badge"""]
UpperCamelCase = processor(text=__magic_name__ )
UpperCamelCase = 1_6
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask"""] )
self.assertEqual(inputs["""input_ids"""].shape , (2, seq_length) )
# test if it raises when no input is passed
with pytest.raises(__magic_name__ ):
processor()
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase = """google/owlvit-base-patch32"""
UpperCamelCase = OwlViTProcessor.from_pretrained(__magic_name__ )
UpperCamelCase = [["""cat""", """nasa badge"""], ["""person"""]]
UpperCamelCase = processor(text=__magic_name__ )
UpperCamelCase = 1_6
UpperCamelCase = len(__magic_name__ )
UpperCamelCase = max([len(__magic_name__ ) for texts in input_texts] )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask"""] )
self.assertEqual(inputs["""input_ids"""].shape , (batch_size * num_max_text_queries, seq_length) )
# test if it raises when no input is passed
with pytest.raises(__magic_name__ ):
processor()
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase = """google/owlvit-base-patch32"""
UpperCamelCase = OwlViTProcessor.from_pretrained(__magic_name__ )
UpperCamelCase = ["""cat""", """nasa badge"""]
UpperCamelCase = processor(text=__magic_name__ )
UpperCamelCase = 1_6
UpperCamelCase = inputs["""input_ids"""]
UpperCamelCase = [
[4_9_4_0_6, 2_3_6_8, 4_9_4_0_7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[4_9_4_0_6, 6_8_4_1, 1_1_3_0_1, 4_9_4_0_7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask"""] )
self.assertEqual(inputs["""input_ids"""].shape , (2, seq_length) )
self.assertListEqual(list(input_ids[0] ) , predicted_ids[0] )
self.assertListEqual(list(input_ids[1] ) , predicted_ids[1] )
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
UpperCamelCase = self.get_image_processor()
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = OwlViTProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
UpperCamelCase = self.prepare_image_inputs()
UpperCamelCase = self.prepare_image_inputs()
UpperCamelCase = processor(images=__magic_name__ , query_images=__magic_name__ )
self.assertListEqual(list(inputs.keys() ) , ["""query_pixel_values""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(__magic_name__ ):
processor()
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase = self.get_image_processor()
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = OwlViTProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
UpperCamelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCamelCase = processor.batch_decode(__magic_name__ )
UpperCamelCase = tokenizer.batch_decode(__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
| 181
| 0
|
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
A = logging.get_logger(__name__)
class lowercase__ ( __SCREAMING_SNAKE_CASE ):
def __init__( self : str , *_lowercase : List[Any] , **_lowercase : List[str] ):
"""simple docstring"""
warnings.warn(
"The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use GLPNImageProcessor instead." , _lowercase , )
super().__init__(*_lowercase , **_lowercase )
| 475
|
def __UpperCAmelCase ( __A ) -> bool:
'''simple docstring'''
return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print("Program to check whether a number is a Perfect number or not...")
A = int(input("Enter number: ").strip())
print(f"{number} is {'' if perfect(number) else 'not '}a Perfect Number.")
| 475
| 1
|
import math
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
import torch
from ...models import TaFilmDecoder
from ...schedulers import DDPMScheduler
from ...utils import is_onnx_available, logging, randn_tensor
if is_onnx_available():
from ..onnx_utils import OnnxRuntimeModel
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
from .continous_encoder import SpectrogramContEncoder
from .notes_encoder import SpectrogramNotesEncoder
__UpperCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
__UpperCAmelCase = 2_5_6
class A__ ( __lowerCamelCase ):
"""simple docstring"""
_lowercase : List[Any] = ['''melgan''']
def __init__( self : int , A_ : SpectrogramNotesEncoder , A_ : SpectrogramContEncoder , A_ : TaFilmDecoder , A_ : DDPMScheduler , A_ : OnnxRuntimeModel if is_onnx_available() else Any , ):
'''simple docstring'''
super().__init__()
# From MELGAN
_lowerCAmelCase : Optional[int] = math.log(1E-5 ) # Matches MelGAN training.
_lowerCAmelCase : Tuple = 4.0 # Largest value for most examples
_lowerCAmelCase : str = 1_2_8
self.register_modules(
notes_encoder=UpperCAmelCase_ , continuous_encoder=UpperCAmelCase_ , decoder=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , melgan=UpperCAmelCase_ , )
def __magic_name__ ( self : Dict , A_ : Any , A_ : str=(-1.0, 1.0) , A_ : Union[str, Any]=False ):
'''simple docstring'''
_lowerCAmelCase : Dict = output_range
if clip:
_lowerCAmelCase : Any = torch.clip(UpperCAmelCase_ , self.min_value , self.max_value )
# Scale to [0, 1].
_lowerCAmelCase : Dict = (features - self.min_value) / (self.max_value - self.min_value)
# Scale to [min_out, max_out].
return zero_one * (max_out - min_out) + min_out
def __magic_name__ ( self : Union[str, Any] , A_ : Dict , A_ : List[Any]=(-1.0, 1.0) , A_ : Dict=False ):
'''simple docstring'''
_lowerCAmelCase : Any = input_range
_lowerCAmelCase : Optional[Any] = torch.clip(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) if clip else outputs
# Scale to [0, 1].
_lowerCAmelCase : Any = (outputs - min_out) / (max_out - min_out)
# Scale to [self.min_value, self.max_value].
return zero_one * (self.max_value - self.min_value) + self.min_value
def __magic_name__ ( self : Optional[int] , A_ : List[str] , A_ : str , A_ : Optional[int] ):
'''simple docstring'''
_lowerCAmelCase : Tuple = input_tokens > 0
_lowerCAmelCase : Dict = self.notes_encoder(
encoder_input_tokens=UpperCAmelCase_ , encoder_inputs_mask=UpperCAmelCase_ )
_lowerCAmelCase : Optional[Any] = self.continuous_encoder(
encoder_inputs=UpperCAmelCase_ , encoder_inputs_mask=UpperCAmelCase_ )
return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)]
def __magic_name__ ( self : List[str] , A_ : Tuple , A_ : int , A_ : int ):
'''simple docstring'''
_lowerCAmelCase : List[str] = noise_time
if not torch.is_tensor(UpperCAmelCase_ ):
_lowerCAmelCase : str = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device )
elif torch.is_tensor(UpperCAmelCase_ ) and len(timesteps.shape ) == 0:
_lowerCAmelCase : Dict = timesteps[None].to(input_tokens.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
_lowerCAmelCase : Optional[Any] = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device )
_lowerCAmelCase : Any = self.decoder(
encodings_and_masks=UpperCAmelCase_ , decoder_input_tokens=UpperCAmelCase_ , decoder_noise_time=UpperCAmelCase_ )
return logits
@torch.no_grad()
def __call__( self : List[Any] , A_ : List[List[int]] , A_ : Optional[torch.Generator] = None , A_ : int = 1_0_0 , A_ : bool = True , A_ : str = "numpy" , A_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , A_ : int = 1 , ):
'''simple docstring'''
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or callback_steps <= 0)
):
raise ValueError(
F'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
F''' {type(UpperCAmelCase_ )}.''' )
_lowerCAmelCase : Dict = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa )
_lowerCAmelCase : List[str] = np.zeros([1, 0, self.n_dims] , np.floataa )
_lowerCAmelCase : List[str] = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=UpperCAmelCase_ , device=self.device )
for i, encoder_input_tokens in enumerate(UpperCAmelCase_ ):
if i == 0:
_lowerCAmelCase : List[str] = torch.from_numpy(pred_mel[:1].copy() ).to(
device=self.device , dtype=self.decoder.dtype )
# The first chunk has no previous context.
_lowerCAmelCase : Any = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=UpperCAmelCase_ , device=self.device )
else:
# The full song pipeline does not feed in a context feature, so the mask
# will be all 0s after the feature converter. Because we know we're
# feeding in a full context chunk from the previous prediction, set it
# to all 1s.
_lowerCAmelCase : Union[str, Any] = ones
_lowerCAmelCase : Tuple = self.scale_features(
UpperCAmelCase_ , output_range=[-1.0, 1.0] , clip=UpperCAmelCase_ )
_lowerCAmelCase : Tuple = self.encode(
input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) , continuous_inputs=UpperCAmelCase_ , continuous_mask=UpperCAmelCase_ , )
# Sample encoder_continuous_inputs shaped gaussian noise to begin loop
_lowerCAmelCase : List[Any] = randn_tensor(
shape=encoder_continuous_inputs.shape , generator=UpperCAmelCase_ , device=self.device , dtype=self.decoder.dtype , )
# set step values
self.scheduler.set_timesteps(UpperCAmelCase_ )
# Denoising diffusion loop
for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
_lowerCAmelCase : Any = self.decode(
encodings_and_masks=UpperCAmelCase_ , input_tokens=UpperCAmelCase_ , noise_time=t / self.scheduler.config.num_train_timesteps , )
# Compute previous output: x_t -> x_t-1
_lowerCAmelCase : Optional[int] = self.scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , generator=UpperCAmelCase_ ).prev_sample
_lowerCAmelCase : List[str] = self.scale_to_features(UpperCAmelCase_ , input_range=[-1.0, 1.0] )
_lowerCAmelCase : List[str] = mel[:1]
_lowerCAmelCase : int = mel.cpu().float().numpy()
_lowerCAmelCase : str = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1 )
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(UpperCAmelCase_ , UpperCAmelCase_ )
logger.info("Generated segment" , UpperCAmelCase_ )
if output_type == "numpy" and not is_onnx_available():
raise ValueError(
"Cannot return output in \'np\' format if ONNX is not available. Make sure to have ONNX installed or set \'output_type\' to \'mel\'." )
elif output_type == "numpy" and self.melgan is None:
raise ValueError(
"Cannot return output in \'np\' format if melgan component is not defined. Make sure to define `self.melgan` or set \'output_type\' to \'mel\'." )
if output_type == "numpy":
_lowerCAmelCase : Optional[Any] = self.melgan(input_features=full_pred_mel.astype(np.floataa ) )
else:
_lowerCAmelCase : Optional[int] = full_pred_mel
if not return_dict:
return (output,)
return AudioPipelineOutput(audios=UpperCAmelCase_ )
| 706
|
from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class A__ ( A ):
"""simple docstring"""
def __magic_name__ ( self : List[Any] , A_ : float ):
'''simple docstring'''
return 0.0
def _snake_case ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> tuple[int | float, int | float]:
"""simple docstring"""
_lowerCAmelCase : Union[str, Any] = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] )
_lowerCAmelCase : Optional[Any] = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def _snake_case ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> None:
"""simple docstring"""
_lowerCAmelCase : Any = 512
_lowerCAmelCase : Tuple = [1] + [0] * (size - 1)
_lowerCAmelCase : Optional[Any] = [filter_type.process(SCREAMING_SNAKE_CASE ) for item in inputs]
_lowerCAmelCase : Dict = [0] * (samplerate - size) # zero-padding
outputs += filler
_lowerCAmelCase : Dict = np.abs(np.fft.fft(SCREAMING_SNAKE_CASE ) )
_lowerCAmelCase : str = 20 * np.logaa(SCREAMING_SNAKE_CASE )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel("Frequency (Hz)" )
plt.xscale("log" )
# Display within reasonable bounds
_lowerCAmelCase : Tuple = get_bounds(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) )
plt.ylabel("Gain (dB)" )
plt.plot(SCREAMING_SNAKE_CASE )
plt.show()
def _snake_case ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> None:
"""simple docstring"""
_lowerCAmelCase : Tuple = 512
_lowerCAmelCase : Tuple = [1] + [0] * (size - 1)
_lowerCAmelCase : Dict = [filter_type.process(SCREAMING_SNAKE_CASE ) for item in inputs]
_lowerCAmelCase : Tuple = [0] * (samplerate - size) # zero-padding
outputs += filler
_lowerCAmelCase : Tuple = np.angle(np.fft.fft(SCREAMING_SNAKE_CASE ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel("Frequency (Hz)" )
plt.xscale("log" )
plt.ylim(-2 * pi , 2 * pi )
plt.ylabel("Phase shift (Radians)" )
plt.plot(np.unwrap(SCREAMING_SNAKE_CASE , -2 * pi ) )
plt.show()
| 503
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {"openai-gpt": "https://huggingface.co/openai-gpt/resolve/main/config.json"}
class snake_case__ ( _UpperCamelCase ):
_SCREAMING_SNAKE_CASE : List[str] = "openai-gpt"
_SCREAMING_SNAKE_CASE : Tuple = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : Optional[int] , A__ : Optional[Any]=4_04_78 , A__ : List[Any]=5_12 , A__ : Optional[int]=7_68 , A__ : Union[str, Any]=12 , A__ : int=12 , A__ : Optional[int]="gelu" , A__ : Dict=0.1 , A__ : List[str]=0.1 , A__ : str=0.1 , A__ : str=1E-5 , A__ : Union[str, Any]=0.02 , A__ : Dict="cls_index" , A__ : Dict=True , A__ : Dict=None , A__ : Union[str, Any]=True , A__ : List[str]=0.1 , **A__ : Optional[int] , ) -> int:
'''simple docstring'''
snake_case_ : Dict = vocab_size
snake_case_ : Dict = n_positions
snake_case_ : int = n_embd
snake_case_ : List[Any] = n_layer
snake_case_ : str = n_head
snake_case_ : Any = afn
snake_case_ : str = resid_pdrop
snake_case_ : Tuple = embd_pdrop
snake_case_ : Tuple = attn_pdrop
snake_case_ : str = layer_norm_epsilon
snake_case_ : Tuple = initializer_range
snake_case_ : Optional[Any] = summary_type
snake_case_ : List[str] = summary_use_proj
snake_case_ : Any = summary_activation
snake_case_ : Optional[Any] = summary_first_dropout
snake_case_ : List[str] = summary_proj_to_labels
super().__init__(**A__ )
| 666
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
UpperCAmelCase = logging.get_logger(__name__)
if is_vision_available():
import PIL
class snake_case__ ( _UpperCamelCase ):
_SCREAMING_SNAKE_CASE : Dict = ["pixel_values"]
def __init__( self : Union[str, Any] , A__ : bool = True , A__ : Dict[str, int] = None , A__ : PILImageResampling = PILImageResampling.BICUBIC , A__ : bool = True , A__ : Dict[str, int] = None , A__ : bool = True , A__ : Union[int, float] = 1 / 2_55 , A__ : bool = True , A__ : Optional[Union[float, List[float]]] = None , A__ : Optional[Union[float, List[float]]] = None , A__ : bool = True , **A__ : Optional[int] , ) -> None:
'''simple docstring'''
super().__init__(**A__ )
snake_case_ : str = size if size is not None else {"shortest_edge": 2_24}
snake_case_ : Union[str, Any] = get_size_dict(A__ , default_to_square=A__ )
snake_case_ : List[Any] = crop_size if crop_size is not None else {"height": 2_24, "width": 2_24}
snake_case_ : Dict = get_size_dict(A__ , default_to_square=A__ , param_name="crop_size" )
snake_case_ : str = do_resize
snake_case_ : str = size
snake_case_ : Optional[Any] = resample
snake_case_ : Any = do_center_crop
snake_case_ : Any = crop_size
snake_case_ : str = do_rescale
snake_case_ : Optional[Any] = rescale_factor
snake_case_ : int = do_normalize
snake_case_ : Optional[Any] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
snake_case_ : List[str] = image_std if image_std is not None else OPENAI_CLIP_STD
snake_case_ : int = do_convert_rgb
def UpperCAmelCase__ ( self : Optional[int] , A__ : np.ndarray , A__ : Dict[str, int] , A__ : PILImageResampling = PILImageResampling.BICUBIC , A__ : Optional[Union[str, ChannelDimension]] = None , **A__ : List[str] , ) -> np.ndarray:
'''simple docstring'''
snake_case_ : str = get_size_dict(A__ , default_to_square=A__ )
if "shortest_edge" not in size:
raise ValueError(f"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}" )
snake_case_ : str = get_resize_output_image_size(A__ , size=size["shortest_edge"] , default_to_square=A__ )
return resize(A__ , size=A__ , resample=A__ , data_format=A__ , **A__ )
def UpperCAmelCase__ ( self : Tuple , A__ : np.ndarray , A__ : Dict[str, int] , A__ : Optional[Union[str, ChannelDimension]] = None , **A__ : List[Any] , ) -> np.ndarray:
'''simple docstring'''
snake_case_ : Optional[int] = get_size_dict(A__ )
if "height" not in size or "width" not in size:
raise ValueError(f"The `size` parameter must contain the keys (height, width). Got {size.keys()}" )
return center_crop(A__ , size=(size["height"], size["width"]) , data_format=A__ , **A__ )
def UpperCAmelCase__ ( self : Optional[Any] , A__ : np.ndarray , A__ : Union[int, float] , A__ : Optional[Union[str, ChannelDimension]] = None , **A__ : List[str] , ) -> str:
'''simple docstring'''
return rescale(A__ , scale=A__ , data_format=A__ , **A__ )
def UpperCAmelCase__ ( self : Any , A__ : np.ndarray , A__ : Union[float, List[float]] , A__ : Union[float, List[float]] , A__ : Optional[Union[str, ChannelDimension]] = None , **A__ : Any , ) -> np.ndarray:
'''simple docstring'''
return normalize(A__ , mean=A__ , std=A__ , data_format=A__ , **A__ )
def UpperCAmelCase__ ( self : List[Any] , A__ : ImageInput , A__ : bool = None , A__ : Dict[str, int] = None , A__ : PILImageResampling = None , A__ : bool = None , A__ : int = None , A__ : bool = None , A__ : float = None , A__ : bool = None , A__ : Optional[Union[float, List[float]]] = None , A__ : Optional[Union[float, List[float]]] = None , A__ : bool = None , A__ : Optional[Union[str, TensorType]] = None , A__ : Optional[ChannelDimension] = ChannelDimension.FIRST , **A__ : Optional[Any] , ) -> PIL.Image.Image:
'''simple docstring'''
snake_case_ : List[Any] = do_resize if do_resize is not None else self.do_resize
snake_case_ : Union[str, Any] = size if size is not None else self.size
snake_case_ : Any = get_size_dict(A__ , param_name="size" , default_to_square=A__ )
snake_case_ : Optional[int] = resample if resample is not None else self.resample
snake_case_ : int = do_center_crop if do_center_crop is not None else self.do_center_crop
snake_case_ : List[str] = crop_size if crop_size is not None else self.crop_size
snake_case_ : Tuple = get_size_dict(A__ , param_name="crop_size" , default_to_square=A__ )
snake_case_ : Optional[int] = do_rescale if do_rescale is not None else self.do_rescale
snake_case_ : str = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case_ : List[Any] = do_normalize if do_normalize is not None else self.do_normalize
snake_case_ : Any = image_mean if image_mean is not None else self.image_mean
snake_case_ : List[str] = image_std if image_std is not None else self.image_std
snake_case_ : List[str] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
snake_case_ : List[Any] = make_list_of_images(A__ )
if not valid_images(A__ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
snake_case_ : Dict = [convert_to_rgb(A__ ) for image in images]
# All transformations expect numpy arrays.
snake_case_ : Dict = [to_numpy_array(A__ ) for image in images]
if do_resize:
snake_case_ : Dict = [self.resize(image=A__ , size=A__ , resample=A__ ) for image in images]
if do_center_crop:
snake_case_ : Tuple = [self.center_crop(image=A__ , size=A__ ) for image in images]
if do_rescale:
snake_case_ : str = [self.rescale(image=A__ , scale=A__ ) for image in images]
if do_normalize:
snake_case_ : int = [self.normalize(image=A__ , mean=A__ , std=A__ ) for image in images]
snake_case_ : List[Any] = [to_channel_dimension_format(A__ , A__ ) for image in images]
snake_case_ : Tuple = {"pixel_values": images}
return BatchFeature(data=A__ , tensor_type=A__ )
| 666
| 1
|
'''simple docstring'''
from functools import lru_cache
@lru_cache
def __UpperCamelCase ( __lowerCamelCase : int ) -> int:
'''simple docstring'''
if num < 0:
raise ValueError("Number should not be negative." )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 700
|
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def __UpperCamelCase ( __lowerCamelCase : str , __lowerCamelCase : str , __lowerCamelCase : str , __lowerCamelCase : PreTrainedTokenizer , __lowerCamelCase : int , __lowerCamelCase : Optional[int] = None , ) -> Tuple:
'''simple docstring'''
_a = {}
if train_file is not None:
_a = [train_file]
if eval_file is not None:
_a = [eval_file]
if test_file is not None:
_a = [test_file]
_a = datasets.load_dataset("csv" , data_files=__lowerCamelCase )
_a = list(ds[list(files.keys() )[0]].features.keys() )
_a = features_name.pop(__lowerCamelCase )
_a = list(set(ds[list(files.keys() )[0]][label_name] ) )
_a = {label: i for i, label in enumerate(__lowerCamelCase )}
_a = tokenizer.model_input_names
_a = {}
if len(__lowerCamelCase ) == 1:
for k in files.keys():
_a = ds[k].map(
lambda __lowerCamelCase : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=__lowerCamelCase , max_length=__lowerCamelCase , padding="max_length" ) , batched=__lowerCamelCase , )
elif len(__lowerCamelCase ) == 2:
for k in files.keys():
_a = ds[k].map(
lambda __lowerCamelCase : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=__lowerCamelCase , max_length=__lowerCamelCase , padding="max_length" , ) , batched=__lowerCamelCase , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
_a = {k: v for k, v in ex.items() if k in input_names}
_a = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
_a = {k: v for k, v in ex.items() if k in input_names}
_a = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
_a = {k: v for k, v in ex.items() if k in input_names}
_a = labelaid[ex[label_name]]
yield (d, label)
_a = (
tf.data.Dataset.from_generator(
__lowerCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
_a = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
_a = (
tf.data.Dataset.from_generator(
__lowerCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
_a = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
_a = (
tf.data.Dataset.from_generator(
__lowerCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
_a = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
lowercase__ = logging.getLogger(__name__)
@dataclass
class __SCREAMING_SNAKE_CASE :
UpperCAmelCase = field(metadata={'''help''': '''Which column contains the label'''} )
UpperCAmelCase = field(default=lowerCamelCase__ , metadata={'''help''': '''The path of the training file'''} )
UpperCAmelCase = field(default=lowerCamelCase__ , metadata={'''help''': '''The path of the development file'''} )
UpperCAmelCase = field(default=lowerCamelCase__ , metadata={'''help''': '''The path of the test file'''} )
UpperCAmelCase = field(
default=128 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
UpperCAmelCase = field(
default=lowerCamelCase__ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
@dataclass
class __SCREAMING_SNAKE_CASE :
UpperCAmelCase = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
UpperCAmelCase = field(
default=lowerCamelCase__ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
UpperCAmelCase = field(
default=lowerCamelCase__ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
UpperCAmelCase = field(default=lowerCamelCase__ , metadata={'''help''': '''Set this flag to use fast tokenization.'''} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
UpperCAmelCase = field(
default=lowerCamelCase__ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
def __UpperCamelCase ( ) -> str:
'''simple docstring'''
_a = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
_a , _a , _a = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. Use"
" --overwrite_output_dir to overcome." )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , )
logger.info(
F"n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, "
F"16-bits training: {training_args.fpaa}" )
logger.info(F"Training/evaluation parameters {training_args}" )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_a = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_a , _a , _a , _a = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=__lowerCamelCase , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
_a = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(__lowerCamelCase ) , labelaid=__lowerCamelCase , idalabel={id: label for label, id in labelaid.items()} , finetuning_task="text-classification" , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
_a = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool(".bin" in model_args.model_name_or_path ) , config=__lowerCamelCase , cache_dir=model_args.cache_dir , )
def compute_metrics(__lowerCamelCase : EvalPrediction ) -> Dict:
_a = np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
_a = TFTrainer(
model=__lowerCamelCase , args=__lowerCamelCase , train_dataset=__lowerCamelCase , eval_dataset=__lowerCamelCase , compute_metrics=__lowerCamelCase , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
_a = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
_a = trainer.evaluate()
_a = os.path.join(training_args.output_dir , "eval_results.txt" )
with open(__lowerCamelCase , "w" ) as writer:
logger.info("***** Eval results *****" )
for key, value in result.items():
logger.info(F" {key} = {value}" )
writer.write(F"{key} = {value}\n" )
results.update(__lowerCamelCase )
return results
if __name__ == "__main__":
main()
| 276
| 0
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase__ : Tuple = logging.get_logger(__name__)
UpperCAmelCase__ : Union[str, Any] = "▁"
UpperCAmelCase__ : List[str] = {"vocab_file": "sentencepiece.bpe.model"}
UpperCAmelCase__ : Union[str, Any] = {
"vocab_file": {
"facebook/mbart-large-50-one-to-many-mmt": (
"https://huggingface.co/facebook/mbart-large-50-one-to-many-mmt/resolve/main/sentencepiece.bpe.model"
),
}
}
UpperCAmelCase__ : Optional[Any] = {
"facebook/mbart-large-50-one-to-many-mmt": 10_24,
}
# fmt: off
UpperCAmelCase__ : Tuple = ["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN", "af_ZA", "az_AZ", "bn_IN", "fa_IR", "he_IL", "hr_HR", "id_ID", "ka_GE", "km_KH", "mk_MK", "ml_IN", "mn_MN", "mr_IN", "pl_PL", "ps_AF", "pt_XX", "sv_SE", "sw_KE", "ta_IN", "te_IN", "th_TH", "tl_XX", "uk_UA", "ur_PK", "xh_ZA", "gl_ES", "sl_SI"]
class A ( SCREAMING_SNAKE_CASE__ ):
snake_case__ :Optional[int] = VOCAB_FILES_NAMES
snake_case__ :str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ :Any = PRETRAINED_VOCAB_FILES_MAP
snake_case__ :Tuple = ['input_ids', 'attention_mask']
snake_case__ :List[int] = []
snake_case__ :List[int] = []
def __init__( self : int , __magic_name__ : int , __magic_name__ : Dict=None , __magic_name__ : Optional[int]=None , __magic_name__ : Optional[int]="</s>" , __magic_name__ : List[Any]="</s>" , __magic_name__ : List[Any]="<s>" , __magic_name__ : Tuple="<unk>" , __magic_name__ : List[Any]="<pad>" , __magic_name__ : List[Any]="<mask>" , __magic_name__ : Optional[Dict[str, Any]] = None , **__magic_name__ : List[Any] , ):
"""simple docstring"""
lowerCAmelCase__ = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else mask_token
lowerCAmelCase__ = {} if sp_model_kwargs is None else sp_model_kwargs
lowerCAmelCase__ = kwargs.get("additional_special_tokens" , [] )
kwargs["additional_special_tokens"] += [
code for code in FAIRSEQ_LANGUAGE_CODES if code not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=__magic_name__ , tgt_lang=__magic_name__ , eos_token=__magic_name__ , unk_token=__magic_name__ , sep_token=__magic_name__ , cls_token=__magic_name__ , pad_token=__magic_name__ , mask_token=__magic_name__ , sp_model_kwargs=self.sp_model_kwargs , **__magic_name__ , )
lowerCAmelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__magic_name__ ) )
lowerCAmelCase__ = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
lowerCAmelCase__ = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
lowerCAmelCase__ = 1
lowerCAmelCase__ = len(self.sp_model )
lowerCAmelCase__ = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(__magic_name__ )
}
lowerCAmelCase__ = {v: k for k, v in self.lang_code_to_id.items()}
lowerCAmelCase__ = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
lowerCAmelCase__ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
lowerCAmelCase__ = src_lang if src_lang is not None else "en_XX"
lowerCAmelCase__ = self.lang_code_to_id[self._src_lang]
lowerCAmelCase__ = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def __SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def __SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
return self._src_lang
@src_lang.setter
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __magic_name__ : str ):
"""simple docstring"""
lowerCAmelCase__ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self : Dict ):
"""simple docstring"""
lowerCAmelCase__ = self.__dict__.copy()
lowerCAmelCase__ = None
return state
def __setstate__( self : List[Any] , __magic_name__ : Dict ):
"""simple docstring"""
lowerCAmelCase__ = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
lowerCAmelCase__ = {}
lowerCAmelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
lowerCAmelCase__ = {self.convert_ids_to_tokens(__magic_name__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __SCREAMING_SNAKE_CASE ( self : int , __magic_name__ : str ):
"""simple docstring"""
return self.sp_model.encode(__magic_name__ , out_type=__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __magic_name__ : str ):
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowerCAmelCase__ = self.sp_model.PieceToId(__magic_name__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def __SCREAMING_SNAKE_CASE ( self : Tuple , __magic_name__ : int ):
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : List[Any] ):
"""simple docstring"""
lowerCAmelCase__ = []
lowerCAmelCase__ = ""
lowerCAmelCase__ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__magic_name__ ) + token
lowerCAmelCase__ = True
lowerCAmelCase__ = []
else:
current_sub_tokens.append(__magic_name__ )
lowerCAmelCase__ = False
out_string += self.sp_model.decode(__magic_name__ )
return out_string.strip()
def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : str , __magic_name__ : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(__magic_name__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCAmelCase__ = os.path.join(
__magic_name__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__magic_name__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __magic_name__ )
elif not os.path.isfile(self.vocab_file ):
with open(__magic_name__ , "wb" ) as fi:
lowerCAmelCase__ = self.sp_model.serialized_model_proto()
fi.write(__magic_name__ )
return (out_vocab_file,)
def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None , __magic_name__ : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__magic_name__ , token_ids_a=__magic_name__ , already_has_special_tokens=__magic_name__ )
lowerCAmelCase__ = [1] * len(self.prefix_tokens )
lowerCAmelCase__ = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(__magic_name__ )) + suffix_ones
return prefix_ones + ([0] * len(__magic_name__ )) + ([0] * len(__magic_name__ )) + suffix_ones
def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None ):
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : Dict , __magic_name__ : str , __magic_name__ : Optional[str] , __magic_name__ : Optional[str] , **__magic_name__ : Optional[Any] ):
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
lowerCAmelCase__ = src_lang
lowerCAmelCase__ = self(__magic_name__ , add_special_tokens=__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ )
lowerCAmelCase__ = self.convert_tokens_to_ids(__magic_name__ )
lowerCAmelCase__ = tgt_lang_id
return inputs
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __magic_name__ : List[str] , __magic_name__ : str = "en_XX" , __magic_name__ : Optional[List[str]] = None , __magic_name__ : str = "ro_RO" , **__magic_name__ : Union[str, Any] , ):
"""simple docstring"""
lowerCAmelCase__ = src_lang
lowerCAmelCase__ = tgt_lang
return super().prepare_seqaseq_batch(__magic_name__ , __magic_name__ , **__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : str ):
"""simple docstring"""
lowerCAmelCase__ = self.lang_code_to_id[src_lang]
lowerCAmelCase__ = [self.cur_lang_code_id]
lowerCAmelCase__ = [self.eos_token_id]
def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : str ):
"""simple docstring"""
lowerCAmelCase__ = self.lang_code_to_id[tgt_lang]
lowerCAmelCase__ = [self.cur_lang_code_id]
lowerCAmelCase__ = [self.eos_token_id]
| 48
|
'''simple docstring'''
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def A ( UpperCamelCase_ : Tuple ) -> int:
'''simple docstring'''
for param in module.parameters():
lowerCAmelCase__ = False
def A ( ) -> Tuple:
'''simple docstring'''
lowerCAmelCase__ = "cuda" if torch.cuda.is_available() else "cpu"
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
lowerCAmelCase__ = "mps"
if device == "mps":
print(
"WARNING: MPS currently doesn't seem to work, and messes up backpropagation without any visible torch"
" errors. I recommend using CUDA on a colab notebook or CPU instead if you're facing inexplicable issues"
" with generations." )
return device
def A ( UpperCamelCase_ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase__ = plt.imshow(UpperCamelCase_ )
fig.axes.get_xaxis().set_visible(UpperCamelCase_ )
fig.axes.get_yaxis().set_visible(UpperCamelCase_ )
plt.show()
def A ( ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase__ = datetime.now()
lowerCAmelCase__ = current_time.strftime("%H:%M:%S" )
return timestamp
| 48
| 1
|
a : int = {
'''Pillow''': '''Pillow''',
'''accelerate''': '''accelerate>=0.11.0''',
'''compel''': '''compel==0.1.8''',
'''black''': '''black~=23.1''',
'''datasets''': '''datasets''',
'''filelock''': '''filelock''',
'''flax''': '''flax>=0.4.1''',
'''hf-doc-builder''': '''hf-doc-builder>=0.3.0''',
'''huggingface-hub''': '''huggingface-hub>=0.13.2''',
'''requests-mock''': '''requests-mock==1.10.0''',
'''importlib_metadata''': '''importlib_metadata''',
'''invisible-watermark''': '''invisible-watermark''',
'''isort''': '''isort>=5.5.4''',
'''jax''': '''jax>=0.2.8,!=0.3.2''',
'''jaxlib''': '''jaxlib>=0.1.65''',
'''Jinja2''': '''Jinja2''',
'''k-diffusion''': '''k-diffusion>=0.0.12''',
'''torchsde''': '''torchsde''',
'''note_seq''': '''note_seq''',
'''librosa''': '''librosa''',
'''numpy''': '''numpy''',
'''omegaconf''': '''omegaconf''',
'''parameterized''': '''parameterized''',
'''protobuf''': '''protobuf>=3.20.3,<4''',
'''pytest''': '''pytest''',
'''pytest-timeout''': '''pytest-timeout''',
'''pytest-xdist''': '''pytest-xdist''',
'''ruff''': '''ruff>=0.0.241''',
'''safetensors''': '''safetensors''',
'''sentencepiece''': '''sentencepiece>=0.1.91,!=0.1.92''',
'''scipy''': '''scipy''',
'''onnx''': '''onnx''',
'''regex''': '''regex!=2019.12.17''',
'''requests''': '''requests''',
'''tensorboard''': '''tensorboard''',
'''torch''': '''torch>=1.4''',
'''torchvision''': '''torchvision''',
'''transformers''': '''transformers>=4.25.1''',
'''urllib3''': '''urllib3<=2.0.0''',
}
| 708
|
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def lowercase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowercase = TaConfig.from_json_file(_UpperCamelCase )
print(F'Building PyTorch model from configuration: {config}' )
__lowercase = TaForConditionalGeneration(_UpperCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_ta(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
a : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
a : str = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 527
| 0
|
def __lowercase ( lowerCamelCase : float ):
return 10 - x * x
def __lowercase ( lowerCamelCase : float , lowerCamelCase : float ):
# Bolzano theory in order to find if there is a root between a and b
if equation(lowerCamelCase ) * equation(lowerCamelCase ) >= 0:
raise ValueError('Wrong space!' )
UpperCamelCase_ : List[Any] = a
while (b - a) >= 0.0_1:
# Find middle point
UpperCamelCase_ : str = (a + b) / 2
# Check if middle point is root
if equation(lowerCamelCase ) == 0.0:
break
# Decide the side to repeat the steps
if equation(lowerCamelCase ) * equation(lowerCamelCase ) < 0:
UpperCamelCase_ : List[str] = c
else:
UpperCamelCase_ : Optional[Any] = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 417
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
a_ = {
'configuration_blip': [
'BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BlipConfig',
'BlipTextConfig',
'BlipVisionConfig',
],
'processing_blip': ['BlipProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['BlipImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'BLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'BlipModel',
'BlipPreTrainedModel',
'BlipForConditionalGeneration',
'BlipForQuestionAnswering',
'BlipVisionModel',
'BlipTextModel',
'BlipForImageTextRetrieval',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFBlipModel',
'TFBlipPreTrainedModel',
'TFBlipForConditionalGeneration',
'TFBlipForQuestionAnswering',
'TFBlipVisionModel',
'TFBlipTextModel',
'TFBlipForImageTextRetrieval',
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 417
| 1
|
from manim import *
class UpperCamelCase (UpperCAmelCase__ ):
def __snake_case ( self :int ) ->List[Any]:
lowercase : Tuple = Rectangle(height=0.5 , width=0.5 )
lowercase : Optional[Any] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
lowercase : Union[str, Any] = Rectangle(height=0.25 , width=0.25 )
lowercase : Union[str, Any] = [mem.copy() for i in range(6 )]
lowercase : str = [mem.copy() for i in range(6 )]
lowercase : List[str] = VGroup(*__lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 )
lowercase : Any = VGroup(*__lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 )
lowercase : Tuple = VGroup(__lowerCAmelCase , __lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 )
lowercase : Union[str, Any] = Text("""CPU""" , font_size=24 )
lowercase : str = Group(__lowerCAmelCase , __lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0.5 , aligned_edge=__lowerCAmelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__lowerCAmelCase )
lowercase : Any = [mem.copy() for i in range(4 )]
lowercase : Optional[Any] = VGroup(*__lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 )
lowercase : Dict = Text("""GPU""" , font_size=24 )
lowercase : Tuple = Group(__lowerCAmelCase , __lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0.5 , aligned_edge=__lowerCAmelCase )
gpu.move_to([-1, -1, 0] )
self.add(__lowerCAmelCase )
lowercase : Optional[Any] = [mem.copy() for i in range(6 )]
lowercase : List[str] = VGroup(*__lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 )
lowercase : Optional[int] = Text("""Model""" , font_size=24 )
lowercase : Optional[int] = Group(__lowerCAmelCase , __lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0.5 , aligned_edge=__lowerCAmelCase )
model.move_to([3, -1.0, 0] )
self.add(__lowerCAmelCase )
lowercase : Dict = []
lowercase : Any = []
for i, rect in enumerate(__lowerCAmelCase ):
lowercase : int = fill.copy().set_fill(__lowerCAmelCase , opacity=0.8 )
target.move_to(__lowerCAmelCase )
model_arr.append(__lowerCAmelCase )
lowercase : str = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(__lowerCAmelCase , opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(__lowerCAmelCase )
self.add(*__lowerCAmelCase , *__lowerCAmelCase )
lowercase : Optional[Any] = [meta_mem.copy() for i in range(6 )]
lowercase : Any = [meta_mem.copy() for i in range(6 )]
lowercase : List[Any] = VGroup(*__lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 )
lowercase : int = VGroup(*__lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 )
lowercase : Optional[Any] = VGroup(__lowerCAmelCase , __lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0 )
lowercase : Union[str, Any] = Text("""Disk""" , font_size=24 )
lowercase : Optional[int] = Group(__lowerCAmelCase , __lowerCAmelCase ).arrange(__lowerCAmelCase , buff=0.5 , aligned_edge=__lowerCAmelCase )
disk.move_to([-4, -1.25, 0] )
self.add(__lowerCAmelCase , __lowerCAmelCase )
lowercase : Union[str, Any] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
lowercase : Any = MarkupText(
f"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(__lowerCAmelCase , __lowerCAmelCase )
lowercase : Dict = MarkupText(
f"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=18 , )
blue_text.next_to(__lowerCAmelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(__lowerCAmelCase )
lowercase : int = MarkupText(
f"""Now watch as an input is passed through the model\nand how the memory is utilized and handled.""" , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(__lowerCAmelCase ) )
lowercase : Tuple = Square(0.3 )
input.set_fill(__lowerCAmelCase , opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] , __lowerCAmelCase , buff=0.5 )
self.play(Write(__lowerCAmelCase ) )
input.generate_target()
input.target.next_to(model_arr[0] , direction=__lowerCAmelCase , buff=0.02 )
self.play(MoveToTarget(__lowerCAmelCase ) )
self.play(FadeOut(__lowerCAmelCase ) )
lowercase : int = Arrow(start=__lowerCAmelCase , end=__lowerCAmelCase , color=__lowerCAmelCase , buff=0.5 )
a.next_to(model_arr[0].get_left() , __lowerCAmelCase , buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
lowercase : int = MarkupText(
f"""As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.""" , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(__lowerCAmelCase , run_time=3 ) )
lowercase : List[str] = {"""run_time""": 1, """fade_in""": True, """fade_out""": True, """buff""": 0.02}
self.play(
Write(__lowerCAmelCase ) , Circumscribe(model_arr[0] , color=__lowerCAmelCase , **__lowerCAmelCase ) , Circumscribe(model_cpu_arr[0] , color=__lowerCAmelCase , **__lowerCAmelCase ) , Circumscribe(gpu_rect[0] , color=__lowerCAmelCase , **__lowerCAmelCase ) , )
self.play(MoveToTarget(model_cpu_arr[0] ) )
lowercase : Optional[int] = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.02 , __lowerCAmelCase , buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.02 )
lowercase : Any = AnimationGroup(
FadeOut(__lowerCAmelCase , run_time=0.5 ) , MoveToTarget(__lowerCAmelCase , run_time=0.5 ) , FadeIn(__lowerCAmelCase , run_time=0.5 ) , lag_ratio=0.2 )
self.play(__lowerCAmelCase )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
lowercase : List[Any] = 0.7
self.play(
Circumscribe(model_arr[i] , **__lowerCAmelCase ) , Circumscribe(cpu_left_col_base[i] , **__lowerCAmelCase ) , Circumscribe(cpu_left_col_base[i + 1] , color=__lowerCAmelCase , **__lowerCAmelCase ) , Circumscribe(gpu_rect[0] , color=__lowerCAmelCase , **__lowerCAmelCase ) , Circumscribe(model_arr[i + 1] , color=__lowerCAmelCase , **__lowerCAmelCase ) , )
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , )
else:
self.play(
MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , )
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.02 , buff=0.2 )
self.play(
Circumscribe(model_arr[-1] , color=__lowerCAmelCase , **__lowerCAmelCase ) , Circumscribe(cpu_left_col_base[-1] , color=__lowerCAmelCase , **__lowerCAmelCase ) , Circumscribe(gpu_rect[0] , color=__lowerCAmelCase , **__lowerCAmelCase ) , )
self.play(MoveToTarget(model_cpu_arr[i] ) )
lowercase : Any = a_c
lowercase : List[str] = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] , RIGHT + 0.02 , buff=0.5 )
self.play(
FadeOut(__lowerCAmelCase ) , FadeOut(__lowerCAmelCase , run_time=0.5 ) , )
lowercase : Tuple = MarkupText(f"""Inference on a model too large for GPU memory\nis successfully completed.""" , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(__lowerCAmelCase , run_time=3 ) , MoveToTarget(__lowerCAmelCase ) )
self.wait()
| 710
|
"""simple docstring"""
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class UpperCamelCase (__snake_case , unittest.TestCase ):
# FIXME: add fast tests
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class UpperCamelCase (unittest.TestCase ):
@property
def __snake_case ( self :Union[str, Any] ) ->List[str]:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def __snake_case ( self :Union[str, Any] ) ->Optional[Any]:
lowercase : Optional[Any] = ort.SessionOptions()
lowercase : List[Any] = False
return options
def __snake_case ( self :int ) ->Union[str, Any]:
lowercase : Union[str, Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo.png""" )
lowercase : List[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo_mask.png""" )
lowercase : Tuple = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"""runwayml/stable-diffusion-inpainting""" , revision="""onnx""" , safety_checker=__magic_name__ , feature_extractor=__magic_name__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__magic_name__ )
lowercase : List[Any] = """A red cat sitting on a park bench"""
lowercase : List[str] = np.random.RandomState(0 )
lowercase : str = pipe(
prompt=__magic_name__ , image=__magic_name__ , mask_image=__magic_name__ , guidance_scale=7.5 , num_inference_steps=10 , generator=__magic_name__ , output_type="""np""" , )
lowercase : Optional[Any] = output.images
lowercase : List[Any] = images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
lowercase : Any = np.array([0.2514, 0.3007, 0.3517, 0.1790, 0.2382, 0.3167, 0.1944, 0.2273, 0.2464] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __snake_case ( self :int ) ->Optional[int]:
lowercase : Tuple = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo.png""" )
lowercase : Any = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo_mask.png""" )
lowercase : Union[str, Any] = LMSDiscreteScheduler.from_pretrained(
"""runwayml/stable-diffusion-inpainting""" , subfolder="""scheduler""" , revision="""onnx""" )
lowercase : Union[str, Any] = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"""runwayml/stable-diffusion-inpainting""" , revision="""onnx""" , scheduler=__magic_name__ , safety_checker=__magic_name__ , feature_extractor=__magic_name__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__magic_name__ )
lowercase : Union[str, Any] = """A red cat sitting on a park bench"""
lowercase : Tuple = np.random.RandomState(0 )
lowercase : Union[str, Any] = pipe(
prompt=__magic_name__ , image=__magic_name__ , mask_image=__magic_name__ , guidance_scale=7.5 , num_inference_steps=20 , generator=__magic_name__ , output_type="""np""" , )
lowercase : List[Any] = output.images
lowercase : Union[str, Any] = images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
lowercase : Dict = np.array([0.0086, 0.0077, 0.0083, 0.0093, 0.0107, 0.0139, 0.0094, 0.0097, 0.0125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
| 348
| 0
|
'''simple docstring'''
from itertools import product
from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey
from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ : List[Any] = k_size // 2
lowercase_ , lowercase_ : Dict = mgrid[0 - center : k_size - center, 0 - center : k_size - center]
lowercase_ : Optional[Any] = 1 / (2 * pi * sigma) * exp(-(square(_UpperCamelCase ) + square(_UpperCamelCase )) / (2 * square(_UpperCamelCase )) )
return g
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ , lowercase_ : Any = image.shape[0], image.shape[1]
# dst image height and width
lowercase_ : Optional[int] = height - k_size + 1
lowercase_ : List[str] = width - k_size + 1
# im2col, turn the k_size*k_size pixels into a row and np.vstack all rows
lowercase_ : int = zeros((dst_height * dst_width, k_size * k_size) )
lowercase_ : Any = 0
for i, j in product(range(_UpperCamelCase ) , range(_UpperCamelCase ) ):
lowercase_ : Union[str, Any] = ravel(image[i : i + k_size, j : j + k_size] )
lowercase_ : Optional[int] = window
row += 1
# turn the kernel into shape(k*k, 1)
lowercase_ : Optional[Any] = gen_gaussian_kernel(_UpperCamelCase , _UpperCamelCase )
lowercase_ : int = ravel(_UpperCamelCase )
# reshape and get the dst image
lowercase_ : int = dot(_UpperCamelCase , _UpperCamelCase ).reshape(_UpperCamelCase , _UpperCamelCase ).astype(_UpperCamelCase )
return dst
if __name__ == "__main__":
# read original image
UpperCamelCase__ = imread(r'../image_data/lena.jpg')
# turn image in gray scale value
UpperCamelCase__ = cvtColor(img, COLOR_BGR2GRAY)
# get values with two different mask size
UpperCamelCase__ = gaussian_filter(gray, 3, sigma=1)
UpperCamelCase__ = gaussian_filter(gray, 5, sigma=0.8)
# show result images
imshow('gaussian filter with 3x3 mask', gaussianaxa)
imshow('gaussian filter with 5x5 mask', gaussianaxa)
waitKey()
| 620
|
'''simple docstring'''
import flax.linen as nn
import jax
import jax.numpy as jnp
class _UpperCAmelCase ( nn.Module ):
__lowerCamelCase: int
__lowerCamelCase: jnp.dtype = jnp.floataa
def lowerCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ : List[Any] = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : List[str] , a : Optional[int] ):
'''simple docstring'''
lowercase_ , lowercase_ , lowercase_ , lowercase_ : Optional[Any] = hidden_states.shape
lowercase_ : Tuple = jax.image.resize(
a , shape=(batch, height * 2, width * 2, channels) , method="nearest" , )
lowercase_ : List[Any] = self.conv(a )
return hidden_states
class _UpperCAmelCase ( nn.Module ):
__lowerCamelCase: int
__lowerCamelCase: jnp.dtype = jnp.floataa
def lowerCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ : Any = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : Any , a : int ):
'''simple docstring'''
lowercase_ : Any = self.conv(a )
return hidden_states
class _UpperCAmelCase ( nn.Module ):
__lowerCamelCase: int
__lowerCamelCase: int = None
__lowerCamelCase: float = 0.0
__lowerCamelCase: bool = None
__lowerCamelCase: jnp.dtype = jnp.floataa
def lowerCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase_ : Union[str, Any] = self.in_channels if self.out_channels is None else self.out_channels
lowercase_ : Optional[int] = nn.GroupNorm(num_groups=3_2 , epsilon=1e-5 )
lowercase_ : Tuple = nn.Conv(
a , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
lowercase_ : List[str] = nn.Dense(a , dtype=self.dtype )
lowercase_ : Optional[int] = nn.GroupNorm(num_groups=3_2 , epsilon=1e-5 )
lowercase_ : Any = nn.Dropout(self.dropout_prob )
lowercase_ : Dict = nn.Conv(
a , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
lowercase_ : Tuple = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
lowercase_ : Optional[Any] = None
if use_nin_shortcut:
lowercase_ : Union[str, Any] = nn.Conv(
a , kernel_size=(1, 1) , strides=(1, 1) , padding="VALID" , dtype=self.dtype , )
def __call__( self : List[str] , a : str , a : Dict , a : List[str]=True ):
'''simple docstring'''
lowercase_ : Dict = hidden_states
lowercase_ : int = self.norma(a )
lowercase_ : List[Any] = nn.swish(a )
lowercase_ : Dict = self.conva(a )
lowercase_ : Optional[int] = self.time_emb_proj(nn.swish(a ) )
lowercase_ : Tuple = jnp.expand_dims(jnp.expand_dims(a , 1 ) , 1 )
lowercase_ : List[str] = hidden_states + temb
lowercase_ : Optional[Any] = self.norma(a )
lowercase_ : Any = nn.swish(a )
lowercase_ : List[str] = self.dropout(a , a )
lowercase_ : int = self.conva(a )
if self.conv_shortcut is not None:
lowercase_ : List[str] = self.conv_shortcut(a )
return hidden_states + residual
| 620
| 1
|
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
A_ : int = logging.get_logger(__name__)
A_ : List[Any] = OrderedDict(
[
# Base model mapping
('''albert''', '''FlaxAlbertModel'''),
('''bart''', '''FlaxBartModel'''),
('''beit''', '''FlaxBeitModel'''),
('''bert''', '''FlaxBertModel'''),
('''big_bird''', '''FlaxBigBirdModel'''),
('''blenderbot''', '''FlaxBlenderbotModel'''),
('''blenderbot-small''', '''FlaxBlenderbotSmallModel'''),
('''clip''', '''FlaxCLIPModel'''),
('''distilbert''', '''FlaxDistilBertModel'''),
('''electra''', '''FlaxElectraModel'''),
('''gpt-sw3''', '''FlaxGPT2Model'''),
('''gpt2''', '''FlaxGPT2Model'''),
('''gpt_neo''', '''FlaxGPTNeoModel'''),
('''gptj''', '''FlaxGPTJModel'''),
('''longt5''', '''FlaxLongT5Model'''),
('''marian''', '''FlaxMarianModel'''),
('''mbart''', '''FlaxMBartModel'''),
('''mt5''', '''FlaxMT5Model'''),
('''opt''', '''FlaxOPTModel'''),
('''pegasus''', '''FlaxPegasusModel'''),
('''regnet''', '''FlaxRegNetModel'''),
('''resnet''', '''FlaxResNetModel'''),
('''roberta''', '''FlaxRobertaModel'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormModel'''),
('''roformer''', '''FlaxRoFormerModel'''),
('''t5''', '''FlaxT5Model'''),
('''vision-text-dual-encoder''', '''FlaxVisionTextDualEncoderModel'''),
('''vit''', '''FlaxViTModel'''),
('''wav2vec2''', '''FlaxWav2Vec2Model'''),
('''whisper''', '''FlaxWhisperModel'''),
('''xglm''', '''FlaxXGLMModel'''),
('''xlm-roberta''', '''FlaxXLMRobertaModel'''),
]
)
A_ : str = OrderedDict(
[
# Model for pre-training mapping
('''albert''', '''FlaxAlbertForPreTraining'''),
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''bert''', '''FlaxBertForPreTraining'''),
('''big_bird''', '''FlaxBigBirdForPreTraining'''),
('''electra''', '''FlaxElectraForPreTraining'''),
('''longt5''', '''FlaxLongT5ForConditionalGeneration'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''mt5''', '''FlaxMT5ForConditionalGeneration'''),
('''roberta''', '''FlaxRobertaForMaskedLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMaskedLM'''),
('''roformer''', '''FlaxRoFormerForMaskedLM'''),
('''t5''', '''FlaxT5ForConditionalGeneration'''),
('''wav2vec2''', '''FlaxWav2Vec2ForPreTraining'''),
('''whisper''', '''FlaxWhisperForConditionalGeneration'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMaskedLM'''),
]
)
A_ : List[Any] = OrderedDict(
[
# Model for Masked LM mapping
('''albert''', '''FlaxAlbertForMaskedLM'''),
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''bert''', '''FlaxBertForMaskedLM'''),
('''big_bird''', '''FlaxBigBirdForMaskedLM'''),
('''distilbert''', '''FlaxDistilBertForMaskedLM'''),
('''electra''', '''FlaxElectraForMaskedLM'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''roberta''', '''FlaxRobertaForMaskedLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMaskedLM'''),
('''roformer''', '''FlaxRoFormerForMaskedLM'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMaskedLM'''),
]
)
A_ : Dict = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''blenderbot''', '''FlaxBlenderbotForConditionalGeneration'''),
('''blenderbot-small''', '''FlaxBlenderbotSmallForConditionalGeneration'''),
('''encoder-decoder''', '''FlaxEncoderDecoderModel'''),
('''longt5''', '''FlaxLongT5ForConditionalGeneration'''),
('''marian''', '''FlaxMarianMTModel'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''mt5''', '''FlaxMT5ForConditionalGeneration'''),
('''pegasus''', '''FlaxPegasusForConditionalGeneration'''),
('''t5''', '''FlaxT5ForConditionalGeneration'''),
]
)
A_ : Optional[Any] = OrderedDict(
[
# Model for Image-classsification
('''beit''', '''FlaxBeitForImageClassification'''),
('''regnet''', '''FlaxRegNetForImageClassification'''),
('''resnet''', '''FlaxResNetForImageClassification'''),
('''vit''', '''FlaxViTForImageClassification'''),
]
)
A_ : List[str] = OrderedDict(
[
('''vision-encoder-decoder''', '''FlaxVisionEncoderDecoderModel'''),
]
)
A_ : Optional[int] = OrderedDict(
[
# Model for Causal LM mapping
('''bart''', '''FlaxBartForCausalLM'''),
('''bert''', '''FlaxBertForCausalLM'''),
('''big_bird''', '''FlaxBigBirdForCausalLM'''),
('''electra''', '''FlaxElectraForCausalLM'''),
('''gpt-sw3''', '''FlaxGPT2LMHeadModel'''),
('''gpt2''', '''FlaxGPT2LMHeadModel'''),
('''gpt_neo''', '''FlaxGPTNeoForCausalLM'''),
('''gptj''', '''FlaxGPTJForCausalLM'''),
('''opt''', '''FlaxOPTForCausalLM'''),
('''roberta''', '''FlaxRobertaForCausalLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForCausalLM'''),
('''xglm''', '''FlaxXGLMForCausalLM'''),
('''xlm-roberta''', '''FlaxXLMRobertaForCausalLM'''),
]
)
A_ : List[str] = OrderedDict(
[
# Model for Sequence Classification mapping
('''albert''', '''FlaxAlbertForSequenceClassification'''),
('''bart''', '''FlaxBartForSequenceClassification'''),
('''bert''', '''FlaxBertForSequenceClassification'''),
('''big_bird''', '''FlaxBigBirdForSequenceClassification'''),
('''distilbert''', '''FlaxDistilBertForSequenceClassification'''),
('''electra''', '''FlaxElectraForSequenceClassification'''),
('''mbart''', '''FlaxMBartForSequenceClassification'''),
('''roberta''', '''FlaxRobertaForSequenceClassification'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForSequenceClassification'''),
('''roformer''', '''FlaxRoFormerForSequenceClassification'''),
('''xlm-roberta''', '''FlaxXLMRobertaForSequenceClassification'''),
]
)
A_ : int = OrderedDict(
[
# Model for Question Answering mapping
('''albert''', '''FlaxAlbertForQuestionAnswering'''),
('''bart''', '''FlaxBartForQuestionAnswering'''),
('''bert''', '''FlaxBertForQuestionAnswering'''),
('''big_bird''', '''FlaxBigBirdForQuestionAnswering'''),
('''distilbert''', '''FlaxDistilBertForQuestionAnswering'''),
('''electra''', '''FlaxElectraForQuestionAnswering'''),
('''mbart''', '''FlaxMBartForQuestionAnswering'''),
('''roberta''', '''FlaxRobertaForQuestionAnswering'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForQuestionAnswering'''),
('''roformer''', '''FlaxRoFormerForQuestionAnswering'''),
('''xlm-roberta''', '''FlaxXLMRobertaForQuestionAnswering'''),
]
)
A_ : str = OrderedDict(
[
# Model for Token Classification mapping
('''albert''', '''FlaxAlbertForTokenClassification'''),
('''bert''', '''FlaxBertForTokenClassification'''),
('''big_bird''', '''FlaxBigBirdForTokenClassification'''),
('''distilbert''', '''FlaxDistilBertForTokenClassification'''),
('''electra''', '''FlaxElectraForTokenClassification'''),
('''roberta''', '''FlaxRobertaForTokenClassification'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForTokenClassification'''),
('''roformer''', '''FlaxRoFormerForTokenClassification'''),
('''xlm-roberta''', '''FlaxXLMRobertaForTokenClassification'''),
]
)
A_ : int = OrderedDict(
[
# Model for Multiple Choice mapping
('''albert''', '''FlaxAlbertForMultipleChoice'''),
('''bert''', '''FlaxBertForMultipleChoice'''),
('''big_bird''', '''FlaxBigBirdForMultipleChoice'''),
('''distilbert''', '''FlaxDistilBertForMultipleChoice'''),
('''electra''', '''FlaxElectraForMultipleChoice'''),
('''roberta''', '''FlaxRobertaForMultipleChoice'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMultipleChoice'''),
('''roformer''', '''FlaxRoFormerForMultipleChoice'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMultipleChoice'''),
]
)
A_ : Dict = OrderedDict(
[
('''bert''', '''FlaxBertForNextSentencePrediction'''),
]
)
A_ : Optional[Any] = OrderedDict(
[
('''speech-encoder-decoder''', '''FlaxSpeechEncoderDecoderModel'''),
('''whisper''', '''FlaxWhisperForConditionalGeneration'''),
]
)
A_ : Any = OrderedDict(
[
('''whisper''', '''FlaxWhisperForAudioClassification'''),
]
)
A_ : Dict = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
A_ : Tuple = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
A_ : List[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
A_ : int = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
A_ : Optional[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
A_ : Union[str, Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
A_ : Union[str, Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
A_ : int = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
A_ : Dict = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
A_ : int = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
A_ : int = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
A_ : str = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
A_ : Optional[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
A_ : int = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class _lowercase ( _BaseAutoModelClass ):
_UpperCAmelCase = FLAX_MODEL_MAPPING
A_ : List[str] = auto_class_update(FlaxAutoModel)
class _lowercase ( _BaseAutoModelClass ):
_UpperCAmelCase = FLAX_MODEL_FOR_PRETRAINING_MAPPING
A_ : List[Any] = auto_class_update(FlaxAutoModelForPreTraining, head_doc='''pretraining''')
class _lowercase ( _BaseAutoModelClass ):
_UpperCAmelCase = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
A_ : Optional[int] = auto_class_update(FlaxAutoModelForCausalLM, head_doc='''causal language modeling''')
class _lowercase ( _BaseAutoModelClass ):
_UpperCAmelCase = FLAX_MODEL_FOR_MASKED_LM_MAPPING
A_ : int = auto_class_update(FlaxAutoModelForMaskedLM, head_doc='''masked language modeling''')
class _lowercase ( _BaseAutoModelClass ):
_UpperCAmelCase = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
A_ : Tuple = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc='''sequence-to-sequence language modeling''', checkpoint_for_example='''t5-base'''
)
class _lowercase ( _BaseAutoModelClass ):
_UpperCAmelCase = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
A_ : List[str] = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc='''sequence classification'''
)
class _lowercase ( _BaseAutoModelClass ):
_UpperCAmelCase = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
A_ : int = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='''question answering''')
class _lowercase ( _BaseAutoModelClass ):
_UpperCAmelCase = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
A_ : Any = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc='''token classification'''
)
class _lowercase ( _BaseAutoModelClass ):
_UpperCAmelCase = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
A_ : Tuple = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='''multiple choice''')
class _lowercase ( _BaseAutoModelClass ):
_UpperCAmelCase = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
A_ : Union[str, Any] = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc='''next sentence prediction'''
)
class _lowercase ( _BaseAutoModelClass ):
_UpperCAmelCase = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
A_ : Tuple = auto_class_update(
FlaxAutoModelForImageClassification, head_doc='''image classification'''
)
class _lowercase ( _BaseAutoModelClass ):
_UpperCAmelCase = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
A_ : str = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='''vision-to-text modeling''')
class _lowercase ( _BaseAutoModelClass ):
_UpperCAmelCase = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
A_ : Dict = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc='''sequence-to-sequence speech-to-text modeling'''
)
| 32
|
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.25.0''')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 32
| 1
|
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Dict = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""}
# See all BART models at https://huggingface.co/models?filter=bart
__SCREAMING_SNAKE_CASE : Any = {
"""vocab_file""": {
"""facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/vocab.json""",
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/vocab.json""",
"""facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json""",
"""facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json""",
"""facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json""",
"""yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json""",
},
"""merges_file""": {
"""facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/merges.txt""",
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/merges.txt""",
"""facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt""",
"""facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt""",
"""facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt""",
"""yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt""",
},
}
__SCREAMING_SNAKE_CASE : List[str] = {
"""facebook/bart-base""": 10_24,
"""facebook/bart-large""": 10_24,
"""facebook/bart-large-mnli""": 10_24,
"""facebook/bart-large-cnn""": 10_24,
"""facebook/bart-large-xsum""": 10_24,
"""yjernite/bart_eli5""": 10_24,
}
@lru_cache()
def UpperCAmelCase__ ( ):
'''simple docstring'''
lowerCAmelCase : List[Any] = (
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
lowerCAmelCase : Tuple = bs[:]
lowerCAmelCase : Optional[int] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_lowerCAmelCase )
cs.append(2**8 + n )
n += 1
lowerCAmelCase : Optional[Any] = [chr(_lowerCAmelCase ) for n in cs]
return dict(zip(_lowerCAmelCase , _lowerCAmelCase ) )
def UpperCAmelCase__ ( __magic_name__ : List[str] ):
'''simple docstring'''
lowerCAmelCase : List[Any] = set()
lowerCAmelCase : Tuple = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowerCAmelCase : Optional[int] = char
return pairs
class __magic_name__ ( UpperCAmelCase_ ):
_lowerCAmelCase = VOCAB_FILES_NAMES
_lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase = ["""input_ids""", """attention_mask"""]
def __init__( self : List[str] , lowerCamelCase__ : Any , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : int="replace" , lowerCamelCase__ : List[str]="<s>" , lowerCamelCase__ : Any="</s>" , lowerCamelCase__ : Optional[Any]="</s>" , lowerCamelCase__ : Dict="<s>" , lowerCamelCase__ : List[Any]="<unk>" , lowerCamelCase__ : Any="<pad>" , lowerCamelCase__ : int="<mask>" , lowerCamelCase__ : Tuple=False , **lowerCamelCase__ : List[str] , ):
lowerCAmelCase : int = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else bos_token
lowerCAmelCase : Any = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else eos_token
lowerCAmelCase : int = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else sep_token
lowerCAmelCase : Tuple = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else cls_token
lowerCAmelCase : str = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else unk_token
lowerCAmelCase : str = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase : List[Any] = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else mask_token
super().__init__(
errors=__A , bos_token=__A , eos_token=__A , unk_token=__A , sep_token=__A , cls_token=__A , pad_token=__A , mask_token=__A , add_prefix_space=__A , **__A , )
with open(__A , encoding='''utf-8''' ) as vocab_handle:
lowerCAmelCase : List[str] = json.load(__A )
lowerCAmelCase : Tuple = {v: k for k, v in self.encoder.items()}
lowerCAmelCase : int = errors # how to handle errors in decoding
lowerCAmelCase : Any = bytes_to_unicode()
lowerCAmelCase : List[Any] = {v: k for k, v in self.byte_encoder.items()}
with open(__A , encoding='''utf-8''' ) as merges_handle:
lowerCAmelCase : Optional[Any] = merges_handle.read().split('''\n''' )[1:-1]
lowerCAmelCase : Optional[Any] = [tuple(merge.split() ) for merge in bpe_merges]
lowerCAmelCase : Optional[Any] = dict(zip(__A , range(len(__A ) ) ) )
lowerCAmelCase : List[str] = {}
lowerCAmelCase : Dict = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowerCAmelCase : Any = re.compile(R'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
def _A ( self : Tuple ):
return len(self.encoder )
def _A ( self : Any ):
return dict(self.encoder , **self.added_tokens_encoder )
def _A ( self : Optional[int] , lowerCamelCase__ : List[Any] ):
if token in self.cache:
return self.cache[token]
lowerCAmelCase : Union[str, Any] = tuple(__A )
lowerCAmelCase : Any = get_pairs(__A )
if not pairs:
return token
while True:
lowerCAmelCase : Optional[Any] = min(__A , key=lambda lowerCamelCase__ : self.bpe_ranks.get(__A , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
lowerCAmelCase , lowerCAmelCase : List[Any] = bigram
lowerCAmelCase : Dict = []
lowerCAmelCase : int = 0
while i < len(__A ):
try:
lowerCAmelCase : List[Any] = word.index(__A , __A )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowerCAmelCase : int = j
if word[i] == first and i < len(__A ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCAmelCase : str = tuple(__A )
lowerCAmelCase : int = new_word
if len(__A ) == 1:
break
else:
lowerCAmelCase : Optional[Any] = get_pairs(__A )
lowerCAmelCase : Any = ''' '''.join(__A )
lowerCAmelCase : Any = word
return word
def _A ( self : int , lowerCamelCase__ : Any ):
lowerCAmelCase : Union[str, Any] = []
for token in re.findall(self.pat , __A ):
lowerCAmelCase : Dict = ''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__A ).split(''' ''' ) )
return bpe_tokens
def _A ( self : Optional[int] , lowerCamelCase__ : Optional[Any] ):
return self.encoder.get(__A , self.encoder.get(self.unk_token ) )
def _A ( self : Dict , lowerCamelCase__ : List[Any] ):
return self.decoder.get(__A )
def _A ( self : List[str] , lowerCamelCase__ : Tuple ):
lowerCAmelCase : Optional[int] = ''''''.join(__A )
lowerCAmelCase : Optional[Any] = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors )
return text
def _A ( self : List[Any] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Optional[Any] = None ):
if not os.path.isdir(__A ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCAmelCase : List[str] = os.path.join(
__A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCAmelCase : Tuple = os.path.join(
__A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(__A , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__A , ensure_ascii=__A ) + '''\n''' )
lowerCAmelCase : Dict = 0
with open(__A , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCamelCase__ : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
''' Please check that the tokenizer is not corrupted!''' )
lowerCAmelCase : int = token_index
writer.write(''' '''.join(__A ) + '''\n''' )
index += 1
return vocab_file, merge_file
def _A ( self : Any , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Optional[Any] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCAmelCase : str = [self.cls_token_id]
lowerCAmelCase : Dict = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _A ( self : Union[str, Any] , lowerCamelCase__ : Tuple , lowerCamelCase__ : Dict = None , lowerCamelCase__ : Tuple = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__A , token_ids_a=__A , already_has_special_tokens=__A )
if token_ids_a is None:
return [1] + ([0] * len(__A )) + [1]
return [1] + ([0] * len(__A )) + [1, 1] + ([0] * len(__A )) + [1]
def _A ( self : List[Any] , lowerCamelCase__ : str , lowerCamelCase__ : Tuple = None ):
lowerCAmelCase : Optional[int] = [self.sep_token_id]
lowerCAmelCase : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _A ( self : Optional[Any] , lowerCamelCase__ : int , lowerCamelCase__ : Any=False , **lowerCamelCase__ : List[Any] ):
lowerCAmelCase : int = kwargs.pop('''add_prefix_space''' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(__A ) > 0 and not text[0].isspace()):
lowerCAmelCase : str = ''' ''' + text
return (text, kwargs)
| 348
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, MBartConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel
@require_tf
class UpperCAmelCase :
_A : Union[str, Any] = MBartConfig
_A : Tuple = {}
_A : Tuple = """gelu"""
def __init__( self , __A , __A=13 , __A=7 , __A=True , __A=False , __A=99 , __A=32 , __A=2 , __A=4 , __A=37 , __A=0.1 , __A=0.1 , __A=20 , __A=2 , __A=1 , __A=0 , ):
__UpperCAmelCase = parent
__UpperCAmelCase = batch_size
__UpperCAmelCase = seq_length
__UpperCAmelCase = is_training
__UpperCAmelCase = use_labels
__UpperCAmelCase = vocab_size
__UpperCAmelCase = hidden_size
__UpperCAmelCase = num_hidden_layers
__UpperCAmelCase = num_attention_heads
__UpperCAmelCase = intermediate_size
__UpperCAmelCase = hidden_dropout_prob
__UpperCAmelCase = attention_probs_dropout_prob
__UpperCAmelCase = max_position_embeddings
__UpperCAmelCase = eos_token_id
__UpperCAmelCase = pad_token_id
__UpperCAmelCase = bos_token_id
def __lowerCamelCase ( self ):
__UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
__UpperCAmelCase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
__UpperCAmelCase = tf.concat([input_ids, eos_tensor] , axis=1 )
__UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
__UpperCAmelCase = prepare_mbart_inputs_dict(__A , __A , __A )
return config, inputs_dict
def __lowerCamelCase ( self , __A , __A ):
__UpperCAmelCase = TFMBartModel(config=__A ).get_decoder()
__UpperCAmelCase = inputs_dict['input_ids']
__UpperCAmelCase = input_ids[:1, :]
__UpperCAmelCase = inputs_dict['attention_mask'][:1, :]
__UpperCAmelCase = inputs_dict['head_mask']
__UpperCAmelCase = 1
# first forward pass
__UpperCAmelCase = model(__A , attention_mask=__A , head_mask=__A , use_cache=__A )
__UpperCAmelCase , __UpperCAmelCase = outputs.to_tuple()
__UpperCAmelCase = past_key_values[1]
def _lowerCAmelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None , )-> Tuple:
if attention_mask is None:
__UpperCAmelCase = tf.cast(tf.math.not_equal(_lowerCAmelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
__UpperCAmelCase = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
__UpperCAmelCase = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__UpperCAmelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
__UpperCAmelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class UpperCAmelCase ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
_A : int = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else ()
_A : List[Any] = (TFMBartForConditionalGeneration,) if is_tf_available() else ()
_A : str = (
{
"""conversational""": TFMBartForConditionalGeneration,
"""feature-extraction""": TFMBartModel,
"""summarization""": TFMBartForConditionalGeneration,
"""text2text-generation""": TFMBartForConditionalGeneration,
"""translation""": TFMBartForConditionalGeneration,
}
if is_tf_available()
else {}
)
_A : List[str] = True
_A : Optional[int] = False
_A : Optional[Any] = False
def __lowerCamelCase ( self , __A , __A , __A , __A , __A ):
if pipeline_test_casse_name != "FeatureExtractionPipelineTests":
# Exception encountered when calling layer '...'
return True
return False
def __lowerCamelCase ( self ):
__UpperCAmelCase = TFMBartModelTester(self )
__UpperCAmelCase = ConfigTester(self , config_class=__A )
def __lowerCamelCase ( self ):
self.config_tester.run_common_tests()
def __lowerCamelCase ( self ):
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__A )
@require_sentencepiece
@require_tokenizers
@require_tf
class UpperCAmelCase ( unittest.TestCase ):
_A : List[str] = [
""" UN Chief Says There Is No Military Solution in Syria""",
]
_A : Optional[Any] = [
"""Şeful ONU declară că nu există o soluţie militară în Siria""",
]
_A : Optional[int] = """facebook/mbart-large-en-ro"""
@cached_property
def __lowerCamelCase ( self ):
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def __lowerCamelCase ( self ):
__UpperCAmelCase = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def __lowerCamelCase ( self , **__A ):
__UpperCAmelCase = self.translate_src_text(**__A )
self.assertListEqual(self.expected_text , __A )
def __lowerCamelCase ( self , **__A ):
__UpperCAmelCase = self.tokenizer(self.src_text , **__A , return_tensors='tf' )
__UpperCAmelCase = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 )
__UpperCAmelCase = self.tokenizer.batch_decode(__A , skip_special_tokens=__A )
return generated_words
@slow
def __lowerCamelCase ( self ):
self._assert_generated_batch_equal_expected()
| 126
| 0
|
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_: Any = logging.get_logger(__name__)
# TODO Update this
lowercase_: Tuple = {
'facebook/esm-1b': 'https://huggingface.co/facebook/esm-1b/resolve/main/config.json',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class lowercase__ (__snake_case ):
"""simple docstring"""
__UpperCamelCase : int = 'esm'
def __init__( self : Tuple , __a : Dict=None , __a : Union[str, Any]=None , __a : List[str]=None , __a : Union[str, Any]=7_6_8 , __a : Union[str, Any]=1_2 , __a : List[Any]=1_2 , __a : List[str]=3_0_7_2 , __a : int=0.1 , __a : Tuple=0.1 , __a : int=1_0_2_6 , __a : Dict=0.02 , __a : Optional[int]=1e-12 , __a : Optional[int]="absolute" , __a : Optional[int]=True , __a : Union[str, Any]=None , __a : Union[str, Any]=False , __a : Union[str, Any]=False , __a : str=None , __a : List[str]=None , **__a : Optional[Any] , ):
super().__init__(pad_token_id=__a , mask_token_id=__a , **__a )
snake_case__ : Union[str, Any] = vocab_size
snake_case__ : Dict = hidden_size
snake_case__ : Optional[Any] = num_hidden_layers
snake_case__ : int = num_attention_heads
snake_case__ : int = intermediate_size
snake_case__ : Any = hidden_dropout_prob
snake_case__ : Any = attention_probs_dropout_prob
snake_case__ : Optional[Any] = max_position_embeddings
snake_case__ : str = initializer_range
snake_case__ : Dict = layer_norm_eps
snake_case__ : List[Any] = position_embedding_type
snake_case__ : Tuple = use_cache
snake_case__ : Optional[Any] = emb_layer_norm_before
snake_case__ : Dict = token_dropout
snake_case__ : str = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info("""No esmfold_config supplied for folding model, using default values.""" )
snake_case__ : str = EsmFoldConfig()
elif isinstance(__a , __a ):
snake_case__ : str = EsmFoldConfig(**__a )
snake_case__ : List[str] = esmfold_config
if vocab_list is None:
logger.warning("""No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!""" )
snake_case__ : Optional[int] = get_default_vocab_list()
else:
snake_case__ : List[str] = vocab_list
else:
snake_case__ : Dict = None
snake_case__ : str = None
if self.esmfold_config is not None and getattr(self.esmfold_config , """use_esm_attn_map""" , __a ):
raise ValueError("""The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!""" )
def lowercase ( self : Dict ):
snake_case__ : List[Any] = super().to_dict()
if isinstance(self.esmfold_config , __a ):
snake_case__ : Tuple = self.esmfold_config.to_dict()
return output
@dataclass
class lowercase__ :
"""simple docstring"""
__UpperCamelCase : str = None
__UpperCamelCase : bool = True
__UpperCamelCase : bool = False
__UpperCamelCase : bool = False
__UpperCamelCase : bool = False
__UpperCamelCase : float = 0
__UpperCamelCase : bool = True
__UpperCamelCase : bool = False
__UpperCamelCase : int = 1_2_8
__UpperCamelCase : "TrunkConfig" = None
def lowercase ( self : Optional[Any] ):
if self.trunk is None:
snake_case__ : Dict = TrunkConfig()
elif isinstance(self.trunk , __a ):
snake_case__ : str = TrunkConfig(**self.trunk )
def lowercase ( self : Union[str, Any] ):
snake_case__ : int = asdict(self )
snake_case__ : List[str] = self.trunk.to_dict()
return output
@dataclass
class lowercase__ :
"""simple docstring"""
__UpperCamelCase : int = 4_8
__UpperCamelCase : int = 1_0_2_4
__UpperCamelCase : int = 1_2_8
__UpperCamelCase : int = 3_2
__UpperCamelCase : int = 3_2
__UpperCamelCase : int = 3_2
__UpperCamelCase : float = 0
__UpperCamelCase : float = 0
__UpperCamelCase : bool = False
__UpperCamelCase : int = 4
__UpperCamelCase : Optional[int] = 1_2_8
__UpperCamelCase : "StructureModuleConfig" = None
def lowercase ( self : Any ):
if self.structure_module is None:
snake_case__ : List[Any] = StructureModuleConfig()
elif isinstance(self.structure_module , __a ):
snake_case__ : int = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(f'`max_recycles` should be positive, got {self.max_recycles}.' )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
"""`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got"""
f' {self.sequence_state_dim} and {self.sequence_state_dim}.' )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
"""`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got"""
f' {self.pairwise_state_dim} and {self.pairwise_state_dim}.' )
snake_case__ : int = self.sequence_state_dim // self.sequence_head_width
snake_case__ : int = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
"""`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got"""
f' {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.' )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
"""`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got"""
f' {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.' )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(f'`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.' )
if self.dropout >= 0.4:
raise ValueError(f'`dropout` should not be greater than 0.4, got {self.dropout}.' )
def lowercase ( self : Dict ):
snake_case__ : Optional[Any] = asdict(self )
snake_case__ : Any = self.structure_module.to_dict()
return output
@dataclass
class lowercase__ :
"""simple docstring"""
__UpperCamelCase : int = 3_8_4
__UpperCamelCase : int = 1_2_8
__UpperCamelCase : int = 1_6
__UpperCamelCase : int = 1_2_8
__UpperCamelCase : int = 1_2
__UpperCamelCase : int = 4
__UpperCamelCase : int = 8
__UpperCamelCase : float = 0.1
__UpperCamelCase : int = 8
__UpperCamelCase : int = 1
__UpperCamelCase : int = 2
__UpperCamelCase : int = 7
__UpperCamelCase : int = 1_0
__UpperCamelCase : float = 1E-8
__UpperCamelCase : float = 1E5
def lowercase ( self : Union[str, Any] ):
return asdict(self )
def _lowercase ( ):
"""simple docstring"""
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 127
|
def _lowercase ( UpperCAmelCase_):
"""simple docstring"""
snake_case__ : Any = 1
snake_case__ : Dict = 2
while i * i <= n:
snake_case__ : Dict = 0
while n % i == 0:
n //= i
multiplicity += 1
n_divisors *= multiplicity + 1
i += 1
if n > 1:
n_divisors *= 2
return n_divisors
def _lowercase ( ):
"""simple docstring"""
snake_case__ : int = 1
snake_case__ : str = 1
while True:
i += 1
t_num += i
if count_divisors(UpperCAmelCase_) > 500:
break
return t_num
if __name__ == "__main__":
print(solution())
| 127
| 1
|
from math import factorial, radians
def __UpperCAmelCase ( lowerCamelCase_ : float , lowerCamelCase_ : int = 18 , lowerCamelCase_ : int = 10 ) -> float:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = angle_in_degrees - ((angle_in_degrees // 3_6_0.0) * 3_6_0.0)
# Converting from degrees to radians
SCREAMING_SNAKE_CASE_ : Optional[int] = radians(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : List[Any] = angle_in_radians
SCREAMING_SNAKE_CASE_ : Any = 3
SCREAMING_SNAKE_CASE_ : Tuple = -1
for _ in range(lowerCamelCase_ ):
result += (b * (angle_in_radians**a)) / factorial(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : List[str] = -b # One positive term and the next will be negative and so on...
a += 2 # Increased by 2 for every term.
return round(lowerCamelCase_ , lowerCamelCase_ )
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 105
|
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
UpperCamelCase__ : List[str] = logging.get_logger(__name__)
@add_end_docstrings(lowerCamelCase_ )
class lowerCAmelCase_ ( lowerCamelCase_ ):
def __init__( self ,*snake_case__ ,**snake_case__ ):
super().__init__(*snake_case__ ,**snake_case__ )
self.check_model_type(snake_case__ )
def snake_case ( self ,snake_case__=None ,snake_case__=None ,snake_case__=None ,**snake_case__ ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = {}, {}
if padding is not None:
SCREAMING_SNAKE_CASE_ : Any = padding
if truncation is not None:
SCREAMING_SNAKE_CASE_ : Tuple = truncation
if top_k is not None:
SCREAMING_SNAKE_CASE_ : int = top_k
return preprocess_params, {}, postprocess_params
def __call__( self ,snake_case__ ,snake_case__ = None ,**snake_case__ ):
if isinstance(snake_case__ ,(Image.Image, str) ) and isinstance(snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = {'image': image, 'question': question}
else:
SCREAMING_SNAKE_CASE_ : Optional[int] = image
SCREAMING_SNAKE_CASE_ : List[Any] = super().__call__(snake_case__ ,**snake_case__ )
return results
def snake_case ( self ,snake_case__ ,snake_case__=False ,snake_case__=False ):
SCREAMING_SNAKE_CASE_ : List[str] = load_image(inputs['image'] )
SCREAMING_SNAKE_CASE_ : Dict = self.tokenizer(
inputs['question'] ,return_tensors=self.framework ,padding=snake_case__ ,truncation=snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = self.image_processor(images=snake_case__ ,return_tensors=self.framework )
model_inputs.update(snake_case__ )
return model_inputs
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Optional[int] = self.model(**snake_case__ )
return model_outputs
def snake_case ( self ,snake_case__ ,snake_case__=5 ):
if top_k > self.model.config.num_labels:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.model.config.num_labels
if self.framework == "pt":
SCREAMING_SNAKE_CASE_ : Any = model_outputs.logits.sigmoid()[0]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = probs.topk(snake_case__ )
else:
raise ValueError(F'Unsupported framework: {self.framework}' )
SCREAMING_SNAKE_CASE_ : Optional[int] = scores.tolist()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(snake_case__ ,snake_case__ )]
| 105
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_a : Optional[int] = {'configuration_unispeech': ['UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP', 'UniSpeechConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Optional[int] = [
'UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST',
'UniSpeechForCTC',
'UniSpeechForPreTraining',
'UniSpeechForSequenceClassification',
'UniSpeechModel',
'UniSpeechPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
_a : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 709
|
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class UpperCamelCase_ ( unittest.TestCase ):
"""simple docstring"""
A = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
A = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def lowerCamelCase_ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
__lowerCamelCase = TextaTextGenerationPipeline(model=UpperCAmelCase , tokenizer=UpperCAmelCase )
return generator, ["Something to write", "Something else"]
def lowerCamelCase_ ( self , UpperCAmelCase , UpperCAmelCase ):
__lowerCamelCase = generator("""Something there""" )
self.assertEqual(UpperCAmelCase , [{"""generated_text""": ANY(UpperCAmelCase )}] )
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]["""generated_text"""].startswith("""Something there""" ) )
__lowerCamelCase = generator(["""This is great !""", """Something else"""] , num_return_sequences=2 , do_sample=UpperCAmelCase )
self.assertEqual(
UpperCAmelCase , [
[{"""generated_text""": ANY(UpperCAmelCase )}, {"""generated_text""": ANY(UpperCAmelCase )}],
[{"""generated_text""": ANY(UpperCAmelCase )}, {"""generated_text""": ANY(UpperCAmelCase )}],
] , )
__lowerCamelCase = generator(
["""This is great !""", """Something else"""] , num_return_sequences=2 , batch_size=2 , do_sample=UpperCAmelCase )
self.assertEqual(
UpperCAmelCase , [
[{"""generated_text""": ANY(UpperCAmelCase )}, {"""generated_text""": ANY(UpperCAmelCase )}],
[{"""generated_text""": ANY(UpperCAmelCase )}, {"""generated_text""": ANY(UpperCAmelCase )}],
] , )
with self.assertRaises(UpperCAmelCase ):
generator(4 )
@require_torch
def lowerCamelCase_ ( self ):
__lowerCamelCase = pipeline("""text2text-generation""" , model="""patrickvonplaten/t5-tiny-random""" , framework="""pt""" )
# do_sample=False necessary for reproducibility
__lowerCamelCase = generator("""Something there""" , do_sample=UpperCAmelCase )
self.assertEqual(UpperCAmelCase , [{"""generated_text""": """"""}] )
__lowerCamelCase = 3
__lowerCamelCase = generator(
"""Something there""" , num_return_sequences=UpperCAmelCase , num_beams=UpperCAmelCase , )
__lowerCamelCase = [
{"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide Beide"""},
{"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide"""},
{"""generated_text""": """"""},
]
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
__lowerCamelCase = generator("""This is a test""" , do_sample=UpperCAmelCase , num_return_sequences=2 , return_tensors=UpperCAmelCase )
self.assertEqual(
UpperCAmelCase , [
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
] , )
__lowerCamelCase = generator.model.config.eos_token_id
__lowerCamelCase = """<pad>"""
__lowerCamelCase = generator(
["""This is a test""", """This is a second test"""] , do_sample=UpperCAmelCase , num_return_sequences=2 , batch_size=2 , return_tensors=UpperCAmelCase , )
self.assertEqual(
UpperCAmelCase , [
[
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
],
[
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
],
] , )
@require_tf
def lowerCamelCase_ ( self ):
__lowerCamelCase = pipeline("""text2text-generation""" , model="""patrickvonplaten/t5-tiny-random""" , framework="""tf""" )
# do_sample=False necessary for reproducibility
__lowerCamelCase = generator("""Something there""" , do_sample=UpperCAmelCase )
self.assertEqual(UpperCAmelCase , [{"""generated_text""": """"""}] )
| 571
| 0
|
'''simple docstring'''
def A__ ( __lowerCAmelCase : int ):
lowerCamelCase__ = int(__lowerCAmelCase )
if decimal in (0, 1): # Exit cases for the recursion
return str(__lowerCAmelCase )
lowerCamelCase__ , lowerCamelCase__ = divmod(__lowerCAmelCase , 2 )
return binary_recursive(__lowerCAmelCase ) + str(__lowerCAmelCase )
def A__ ( __lowerCAmelCase : str ):
lowerCamelCase__ = str(__lowerCAmelCase ).strip()
if not number:
raise ValueError("""No input value was provided""" )
lowerCamelCase__ = """-""" if number.startswith("""-""" ) else """"""
lowerCamelCase__ = number.lstrip("""-""" )
if not number.isnumeric():
raise ValueError("""Input value is not an integer""" )
return F'''{negative}0b{binary_recursive(int(__lowerCAmelCase ) )}'''
if __name__ == "__main__":
from doctest import testmod
testmod()
| 50
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
snake_case = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Any ) -> str:
_lowercase = DPTConfig(embedding_type='hybrid' )
if "large" in checkpoint_url:
_lowercase = 1024
_lowercase = 4096
_lowercase = 24
_lowercase = 16
_lowercase = [5, 11, 17, 23]
_lowercase = [256, 512, 1024, 1024]
_lowercase = (1, 384, 384)
if "nyu" or "midas" in checkpoint_url:
_lowercase = 768
_lowercase = [1, 1, 1, 0.5]
_lowercase = [256, 512, 768, 768]
_lowercase = 150
_lowercase = 16
_lowercase = (1, 384, 384)
_lowercase = False
_lowercase = 'project'
if "ade" in checkpoint_url:
_lowercase = True
_lowercase = 768
_lowercase = [1, 1, 1, 0.5]
_lowercase = 150
_lowercase = 16
_lowercase = 'huggingface/label-files'
_lowercase = 'ade20k-id2label.json'
_lowercase = json.load(open(cached_download(hf_hub_url(snake_case__ , snake_case__ , repo_type='dataset' ) ) , 'r' ) )
_lowercase = {int(snake_case__ ): v for k, v in idalabel.items()}
_lowercase = idalabel
_lowercase = {v: k for k, v in idalabel.items()}
_lowercase = [1, 150, 480, 480]
return config, expected_shape
def SCREAMING_SNAKE_CASE__ ( snake_case__ :str ) -> str:
_lowercase = ['pretrained.model.head.weight', 'pretrained.model.head.bias']
for k in ignore_keys:
state_dict.pop(snake_case__ , snake_case__ )
def SCREAMING_SNAKE_CASE__ ( snake_case__ :List[Any] ) -> Any:
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
_lowercase = name.replace('pretrained.model' , 'dpt.encoder' )
if "pretrained.model" in name:
_lowercase = name.replace('pretrained.model' , 'dpt.embeddings' )
if "patch_embed" in name:
_lowercase = name.replace('patch_embed' , '' )
if "pos_embed" in name:
_lowercase = name.replace('pos_embed' , 'position_embeddings' )
if "attn.proj" in name:
_lowercase = name.replace('attn.proj' , 'attention.output.dense' )
if "proj" in name and "project" not in name:
_lowercase = name.replace('proj' , 'projection' )
if "blocks" in name:
_lowercase = name.replace('blocks' , 'layer' )
if "mlp.fc1" in name:
_lowercase = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
_lowercase = name.replace('mlp.fc2' , 'output.dense' )
if "norm1" in name and "backbone" not in name:
_lowercase = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name and "backbone" not in name:
_lowercase = name.replace('norm2' , 'layernorm_after' )
if "scratch.output_conv" in name:
_lowercase = name.replace('scratch.output_conv' , 'head' )
if "scratch" in name:
_lowercase = name.replace('scratch' , 'neck' )
if "layer1_rn" in name:
_lowercase = name.replace('layer1_rn' , 'convs.0' )
if "layer2_rn" in name:
_lowercase = name.replace('layer2_rn' , 'convs.1' )
if "layer3_rn" in name:
_lowercase = name.replace('layer3_rn' , 'convs.2' )
if "layer4_rn" in name:
_lowercase = name.replace('layer4_rn' , 'convs.3' )
if "refinenet" in name:
_lowercase = int(name[len('neck.refinenet' ) : len('neck.refinenet' ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
_lowercase = name.replace(F"""refinenet{layer_idx}""" , F"""fusion_stage.layers.{abs(layer_idx-4 )}""" )
if "out_conv" in name:
_lowercase = name.replace('out_conv' , 'projection' )
if "resConfUnit1" in name:
_lowercase = name.replace('resConfUnit1' , 'residual_layer1' )
if "resConfUnit2" in name:
_lowercase = name.replace('resConfUnit2' , 'residual_layer2' )
if "conv1" in name:
_lowercase = name.replace('conv1' , 'convolution1' )
if "conv2" in name:
_lowercase = name.replace('conv2' , 'convolution2' )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
_lowercase = name.replace('pretrained.act_postprocess1.0.project.0' , 'neck.reassemble_stage.readout_projects.0.0' )
if "pretrained.act_postprocess2.0.project.0" in name:
_lowercase = name.replace('pretrained.act_postprocess2.0.project.0' , 'neck.reassemble_stage.readout_projects.1.0' )
if "pretrained.act_postprocess3.0.project.0" in name:
_lowercase = name.replace('pretrained.act_postprocess3.0.project.0' , 'neck.reassemble_stage.readout_projects.2.0' )
if "pretrained.act_postprocess4.0.project.0" in name:
_lowercase = name.replace('pretrained.act_postprocess4.0.project.0' , 'neck.reassemble_stage.readout_projects.3.0' )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
_lowercase = name.replace('pretrained.act_postprocess1.3' , 'neck.reassemble_stage.layers.0.projection' )
if "pretrained.act_postprocess1.4" in name:
_lowercase = name.replace('pretrained.act_postprocess1.4' , 'neck.reassemble_stage.layers.0.resize' )
if "pretrained.act_postprocess2.3" in name:
_lowercase = name.replace('pretrained.act_postprocess2.3' , 'neck.reassemble_stage.layers.1.projection' )
if "pretrained.act_postprocess2.4" in name:
_lowercase = name.replace('pretrained.act_postprocess2.4' , 'neck.reassemble_stage.layers.1.resize' )
if "pretrained.act_postprocess3.3" in name:
_lowercase = name.replace('pretrained.act_postprocess3.3' , 'neck.reassemble_stage.layers.2.projection' )
if "pretrained.act_postprocess4.3" in name:
_lowercase = name.replace('pretrained.act_postprocess4.3' , 'neck.reassemble_stage.layers.3.projection' )
if "pretrained.act_postprocess4.4" in name:
_lowercase = name.replace('pretrained.act_postprocess4.4' , 'neck.reassemble_stage.layers.3.resize' )
if "pretrained" in name:
_lowercase = name.replace('pretrained' , 'dpt' )
if "bn" in name:
_lowercase = name.replace('bn' , 'batch_norm' )
if "head" in name:
_lowercase = name.replace('head' , 'head.head' )
if "encoder.norm" in name:
_lowercase = name.replace('encoder.norm' , 'layernorm' )
if "auxlayer" in name:
_lowercase = name.replace('auxlayer' , 'auxiliary_head.head' )
if "backbone" in name:
_lowercase = name.replace('backbone' , 'backbone.bit.encoder' )
if ".." in name:
_lowercase = name.replace('..' , '.' )
if "stem.conv" in name:
_lowercase = name.replace('stem.conv' , 'bit.embedder.convolution' )
if "blocks" in name:
_lowercase = name.replace('blocks' , 'layers' )
if "convolution" in name and "backbone" in name:
_lowercase = name.replace('convolution' , 'conv' )
if "layer" in name and "backbone" in name:
_lowercase = name.replace('layer' , 'layers' )
if "backbone.bit.encoder.bit" in name:
_lowercase = name.replace('backbone.bit.encoder.bit' , 'backbone.bit' )
if "embedder.conv" in name:
_lowercase = name.replace('embedder.conv' , 'embedder.convolution' )
if "backbone.bit.encoder.stem.norm" in name:
_lowercase = name.replace('backbone.bit.encoder.stem.norm' , 'backbone.bit.embedder.norm' )
return name
def SCREAMING_SNAKE_CASE__ ( snake_case__ :List[str] , snake_case__ :int ) -> Dict:
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_lowercase = state_dict.pop(F"""dpt.encoder.layer.{i}.attn.qkv.weight""" )
_lowercase = state_dict.pop(F"""dpt.encoder.layer.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
_lowercase = in_proj_weight[: config.hidden_size, :]
_lowercase = in_proj_bias[: config.hidden_size]
_lowercase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_lowercase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_lowercase = in_proj_weight[
-config.hidden_size :, :
]
_lowercase = in_proj_bias[-config.hidden_size :]
def SCREAMING_SNAKE_CASE__ ( ) -> Tuple:
_lowercase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_lowercase = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Optional[int] , snake_case__ :List[Any] , snake_case__ :str , snake_case__ :Any , snake_case__ :List[str] ) -> str:
_lowercase , _lowercase = get_dpt_config(snake_case__ )
# load original state_dict from URL
# state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")
_lowercase = torch.load(snake_case__ , map_location='cpu' )
# remove certain keys
remove_ignore_keys_(snake_case__ )
# rename keys
for key in state_dict.copy().keys():
_lowercase = state_dict.pop(snake_case__ )
_lowercase = val
# read in qkv matrices
read_in_q_k_v(snake_case__ , snake_case__ )
# load HuggingFace model
_lowercase = DPTForSemanticSegmentation(snake_case__ ) if 'ade' in checkpoint_url else DPTForDepthEstimation(snake_case__ )
model.load_state_dict(snake_case__ )
model.eval()
# Check outputs on an image
_lowercase = 480 if 'ade' in checkpoint_url else 384
_lowercase = DPTImageProcessor(size=snake_case__ )
_lowercase = prepare_img()
_lowercase = image_processor(snake_case__ , return_tensors='pt' )
# forward pass
_lowercase = model(**snake_case__ ).logits if 'ade' in checkpoint_url else model(**snake_case__ ).predicted_depth
if show_prediction:
_lowercase = (
torch.nn.functional.interpolate(
outputs.unsqueeze(1 ) , size=(image.size[1], image.size[0]) , mode='bicubic' , align_corners=snake_case__ , )
.squeeze()
.cpu()
.numpy()
)
Image.fromarray((prediction / prediction.max()) * 255 ).show()
if pytorch_dump_folder_path is not None:
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(snake_case__ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(snake_case__ )
if push_to_hub:
model.push_to_hub('ybelkada/dpt-hybrid-midas' )
image_processor.push_to_hub('ybelkada/dpt-hybrid-midas' )
if __name__ == "__main__":
snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt""",
type=str,
help="""URL of the original DPT checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
required=False,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
)
parser.add_argument(
"""--model_name""",
default="""dpt-large""",
type=str,
help="""Name of the model, in case you're pushing to the hub.""",
)
parser.add_argument(
"""--show_prediction""",
action="""store_true""",
)
snake_case = parser.parse_args()
convert_dpt_checkpoint(
args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction
)
| 67
| 0
|
'''simple docstring'''
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
A_ = logging.get_logger(__name__)
logging.set_verbosity_info()
def A_ ( snake_case , snake_case ):
if "xprophetnet" in prophetnet_checkpoint_path:
SCREAMING_SNAKE_CASE:Any = XLMProphetNetForConditionalGenerationOld.from_pretrained(snake_case )
SCREAMING_SNAKE_CASE:int = XLMProphetNetForConditionalGeneration.from_pretrained(
snake_case , output_loading_info=snake_case )
else:
SCREAMING_SNAKE_CASE:Optional[int] = ProphetNetForConditionalGenerationOld.from_pretrained(snake_case )
SCREAMING_SNAKE_CASE:Any = ProphetNetForConditionalGeneration.from_pretrained(
snake_case , output_loading_info=snake_case )
SCREAMING_SNAKE_CASE:Optional[Any] = ["key_proj", "value_proj", "query_proj"]
SCREAMING_SNAKE_CASE:Dict = {
"self_attn": "ngram_self_attn",
"cross_attn": "encoder_attn",
"cross_attn_layer_norm": "encoder_attn_layer_norm",
"feed_forward_layer_norm": "final_layer_norm",
"feed_forward": "",
"intermediate": "fc1",
"output": "fc2",
"key_proj": "k_proj",
"query_proj": "q_proj",
"value_proj": "v_proj",
"word_embeddings": "embed_tokens",
"embeddings_layer_norm": "emb_layer_norm",
"relative_pos_embeddings": "relative_linear",
"ngram_embeddings": "ngram_input_embed",
"position_embeddings": "embed_positions",
}
for key in loading_info["missing_keys"]:
SCREAMING_SNAKE_CASE:List[str] = key.split("." )
if attributes[0] == "lm_head":
SCREAMING_SNAKE_CASE:Union[str, Any] = prophet
SCREAMING_SNAKE_CASE:str = prophet_old
else:
SCREAMING_SNAKE_CASE:Optional[Any] = prophet.prophetnet
SCREAMING_SNAKE_CASE:Optional[int] = prophet_old.model
SCREAMING_SNAKE_CASE:Optional[int] = False
for attribute in attributes:
if attribute in mapping:
SCREAMING_SNAKE_CASE:Optional[Any] = mapping[attribute]
if not hasattr(snake_case , snake_case ) and len(snake_case ) > 0:
SCREAMING_SNAKE_CASE:Tuple = attribute
elif hasattr(snake_case , snake_case ):
SCREAMING_SNAKE_CASE:Tuple = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
SCREAMING_SNAKE_CASE:str = old_model.weight
logger.info(F'''{attribute} is initialized.''' )
SCREAMING_SNAKE_CASE:int = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
SCREAMING_SNAKE_CASE:Optional[Any] = old_model.bias
logger.info(F'''{attribute} is initialized''' )
SCREAMING_SNAKE_CASE:Dict = True
break
elif attribute in special_keys and hasattr(snake_case , "in_proj_weight" ):
SCREAMING_SNAKE_CASE:str = old_model.in_proj_weight.shape[0] // 3
SCREAMING_SNAKE_CASE:str = getattr(snake_case , snake_case )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
SCREAMING_SNAKE_CASE:Optional[int] = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
SCREAMING_SNAKE_CASE:Union[str, Any] = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
SCREAMING_SNAKE_CASE:List[str] = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
SCREAMING_SNAKE_CASE:Union[str, Any] = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
SCREAMING_SNAKE_CASE:int = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
SCREAMING_SNAKE_CASE:List[Any] = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
SCREAMING_SNAKE_CASE:Tuple = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 512, "We want 512 position_embeddings."
SCREAMING_SNAKE_CASE:int = nn.Parameter(old_model.embed_positions.weight[:512, :] )
SCREAMING_SNAKE_CASE:Optional[int] = True
break
if attribute.isdigit():
SCREAMING_SNAKE_CASE:List[str] = model[int(snake_case )]
SCREAMING_SNAKE_CASE:Union[str, Any] = old_model[int(snake_case )]
else:
SCREAMING_SNAKE_CASE:Dict = getattr(snake_case , snake_case )
if old_attribute == "":
SCREAMING_SNAKE_CASE:Tuple = old_model
else:
if not hasattr(snake_case , snake_case ):
raise ValueError(F'''{old_model} does not have {old_attribute}''' )
SCREAMING_SNAKE_CASE:List[Any] = getattr(snake_case , snake_case )
if not is_key_init:
raise ValueError(F'''{key} was not correctly initialized!''' )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
prophet.save_pretrained(snake_case )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--prophetnet_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
A_ = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 714
|
'''simple docstring'''
# HF Trainer benchmarking tool
#
# This tool can be used to run and compare multiple dimensions of the HF Trainers args.
#
# It then prints a report once in github format with all the information that needs to be shared
# with others and second time in a console-friendly format, so it's easier to use for tuning things up.
#
# The main idea is:
#
# ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \
# --target-metric-key train_samples_per_second
#
# The variations can be any command line argument that you want to compare and not just dtype as in
# the example.
#
# --variations allows you to compare variations in multiple dimensions.
#
# as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6
# times adding one of:
#
# 1. --tf32 0 --fp16 0
# 2. --tf32 0 --fp16 1
# 3. --tf32 0 --bf16 1
# 4. --tf32 1 --fp16 0
# 5. --tf32 1 --fp16 1
# 6. --tf32 1 --bf16 1
#
# and print the results. This is just a cartesian product - and more than 2 dimensions can be used.
#
# If you want to rely on defaults, this:
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1'
# is identical to this:
# --variations '--tf32 0|--tf32 1' '|--fp16|--bf16'
#
# the leading empty variation in the 2nd dimension is a valid variation.
#
# So here we get the following 6 variations:
#
# 1. --tf32 0
# 2. --tf32 0 --fp16
# 3. --tf32 0 --bf16
# 4. --tf32 1
# 5. --tf32 1 --fp16
# 6. --tf32 1 --bf16
#
# In this particular case we don't know what the default tf32 setting is as it's normally
# pytorch-version dependent). That's why it's best to do an explicit setting of each variation:
# `--tf32 0|--tf32 1`
#
# Here is a full example of a train:
#
# CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \
# --base-cmd \
# ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \
# --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \
# --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \
# --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \
# --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \
# --source_prefix "translate English to Romanian: " --warmup_steps 50 \
# --max_train_samples 20000 --dataloader_num_workers 2 ' \
# --target-metric-key train_samples_per_second --repeat-times 1 --variations \
# '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \
# --repeat-times 1 --base-variation '--tf32 0'
#
# and here is a possible output:
#
#
# | Variation | Train | Diff | Train |
# | | samples | % | loss |
# | | per | | |
# | | second | | |
# |:----------------|----------:|-------:|--------:|
# | --tf32 0 | 285.11 | 0 | 2.51 |
# | --tf32 1 | 342.09 | 20 | 2.51 |
# | --fp16 --tf32 0 | 423.49 | 49 | 2.51 |
# | --fp16 --tf32 1 | 423.13 | 48 | 2.51 |
# | --bf16 --tf32 0 | 416.80 | 46 | 2.52 |
# | --bf16 --tf32 1 | 415.87 | 46 | 2.52 |
#
#
# So you can quickly compare the different outcomes.
#
# Typically running each experiment once is enough, but if the environment is unstable you can
# re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results.
#
# By default it'll use the lowest result as the base line to use as 100% and then compare the rest to
# it as can be seen from the table above, but you can also specify which combination is the one to use as
# the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0'
#
# --target-metric-key is there to tell the program which metrics to compare - the different metric keys are
# inside output_dir/all_results.json. e.g., to measure eval performance instead of train use:
# --target-metric-key eval_samples_per_second
# but of course you will need to adjust the --base-cmd value in the example to perform evaluation as
# well (as currently it doesn't)
#
import argparse
import datetime
import io
import itertools
import json
import math
import os
import platform
import re
import shlex
import subprocess
import sys
from pathlib import Path
from statistics import fmean
import pandas as pd
import torch
from tqdm import tqdm
import transformers
A_ = float("nan")
class _snake_case :
def __init__( self : Any ,SCREAMING_SNAKE_CASE__ : Optional[int] ):
SCREAMING_SNAKE_CASE:Union[str, Any] = sys.stdout
SCREAMING_SNAKE_CASE:List[Any] = open(SCREAMING_SNAKE_CASE__ ,"a" )
def __getattr__( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Optional[Any] ):
return getattr(self.stdout ,SCREAMING_SNAKE_CASE__ )
def __UpperCamelCase ( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : str ):
self.stdout.write(SCREAMING_SNAKE_CASE__ )
# strip tqdm codes
self.file.write(re.sub(R"^.*\r" ,"" ,SCREAMING_SNAKE_CASE__ ,0 ,re.M ) )
def A_ ( snake_case=80 , snake_case=False ):
SCREAMING_SNAKE_CASE:Tuple = []
# deal with critical env vars
SCREAMING_SNAKE_CASE:Optional[int] = ["CUDA_VISIBLE_DEVICES"]
for key in env_keys:
SCREAMING_SNAKE_CASE:Any = os.environ.get(snake_case , snake_case )
if val is not None:
cmd.append(F'''{key}={val}''' )
# python executable (not always needed if the script is executable)
SCREAMING_SNAKE_CASE:Optional[int] = sys.executable if full_python_path else sys.executable.split("/" )[-1]
cmd.append(snake_case )
# now the normal args
cmd += list(map(shlex.quote , sys.argv ) )
# split up into up to MAX_WIDTH lines with shell multi-line escapes
SCREAMING_SNAKE_CASE:str = []
SCREAMING_SNAKE_CASE:str = ""
while len(snake_case ) > 0:
current_line += F'''{cmd.pop(0 )} '''
if len(snake_case ) == 0 or len(snake_case ) + len(cmd[0] ) + 1 > max_width - 1:
lines.append(snake_case )
SCREAMING_SNAKE_CASE:Tuple = ""
return "\\\n".join(snake_case )
def A_ ( snake_case , snake_case ):
# unwrap multi-line input
SCREAMING_SNAKE_CASE:List[Any] = re.sub(r"[\\\n]+" , " " , args.base_cmd )
# remove --output_dir if any and set our own
SCREAMING_SNAKE_CASE:int = re.sub("--output_dir\s+[^\s]+" , "" , args.base_cmd )
args.base_cmd += F''' --output_dir {output_dir}'''
# ensure we have --overwrite_output_dir
SCREAMING_SNAKE_CASE:Union[str, Any] = re.sub("--overwrite_output_dir\s+" , "" , args.base_cmd )
args.base_cmd += " --overwrite_output_dir"
return [sys.executable] + shlex.split(args.base_cmd )
def A_ ( snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
# Enable to debug everything but the run itself, to do it fast and see the progress.
# This is useful for debugging the output formatting quickly - we can remove it later once
# everybody is happy with the output
if 0:
import random
from time import sleep
sleep(0 )
return dict(
{k: random.uniform(0 , 100 ) for k in metric_keys} , **{target_metric_key: random.choice([nan, 10.31, 100.2, 55.6666, 222.2222_2222] )} , )
SCREAMING_SNAKE_CASE:Union[str, Any] = subprocess.run(snake_case , capture_output=snake_case , text=snake_case )
if verbose:
print("STDOUT" , result.stdout )
print("STDERR" , result.stderr )
# save the streams
SCREAMING_SNAKE_CASE:Optional[Any] = variation.replace(" " , "-" )
with open(Path(snake_case ) / F'''log.{prefix}.stdout.txt''' , "w" ) as f:
f.write(result.stdout )
with open(Path(snake_case ) / F'''log.{prefix}.stderr.txt''' , "w" ) as f:
f.write(result.stderr )
if result.returncode != 0:
if verbose:
print("failed" )
return {target_metric_key: nan}
with io.open(F'''{output_dir}/all_results.json''' , "r" , encoding="utf-8" ) as f:
SCREAMING_SNAKE_CASE:List[str] = json.load(snake_case )
# filter out just the keys we want
return {k: v for k, v in metrics.items() if k in metric_keys}
def A_ ( snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ):
SCREAMING_SNAKE_CASE:str = []
SCREAMING_SNAKE_CASE:List[str] = []
SCREAMING_SNAKE_CASE:Any = F'''{id}: {variation:<{longest_variation_len}}'''
SCREAMING_SNAKE_CASE:Tuple = F'''{preamble}: '''
SCREAMING_SNAKE_CASE:Any = set(report_metric_keys + [target_metric_key] )
for i in tqdm(range(snake_case ) , desc=snake_case , leave=snake_case ):
SCREAMING_SNAKE_CASE:Dict = process_run_single(
snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case )
SCREAMING_SNAKE_CASE:Tuple = single_run_metrics[target_metric_key]
if not math.isnan(snake_case ):
metrics.append(snake_case )
results.append(snake_case )
outcome += "✓"
else:
outcome += "✘"
SCREAMING_SNAKE_CASE:Union[str, Any] = F'''\33[2K\r{outcome}'''
if len(snake_case ) > 0:
SCREAMING_SNAKE_CASE:Dict = {k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()}
SCREAMING_SNAKE_CASE:Union[str, Any] = round(mean_metrics[target_metric_key] , 2 )
SCREAMING_SNAKE_CASE:int = F'''{outcome} {mean_target}'''
if len(snake_case ) > 1:
results_str += F''' {tuple(round(snake_case , 2 ) for x in results )}'''
print(snake_case )
SCREAMING_SNAKE_CASE:List[str] = variation
return mean_metrics
else:
print(snake_case )
return {variation_key: variation, target_metric_key: nan}
def A_ ( ):
SCREAMING_SNAKE_CASE:int = torch.cuda.get_device_properties(torch.device("cuda" ) )
return F'''
Datetime : {datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S" )}
Software:
transformers: {transformers.__version__}
torch : {torch.__version__}
cuda : {torch.version.cuda}
python : {platform.python_version()}
Hardware:
{torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB
'''
def A_ ( snake_case , snake_case , snake_case , snake_case , snake_case ):
SCREAMING_SNAKE_CASE:Optional[Any] = pd.DataFrame(snake_case )
SCREAMING_SNAKE_CASE:str = "variation"
SCREAMING_SNAKE_CASE:str = "diff_%"
SCREAMING_SNAKE_CASE:Union[str, Any] = nan
if base_variation is not None and len(df[df[variation_key] == base_variation] ):
# this may still return nan
SCREAMING_SNAKE_CASE:Any = df.loc[df[variation_key] == base_variation][target_metric_key].item()
if math.isnan(snake_case ):
# as a fallback, use the minimal value as the sentinel
SCREAMING_SNAKE_CASE:Any = df.loc[df[target_metric_key] != nan][target_metric_key].min()
# create diff column if possible
if not math.isnan(snake_case ):
SCREAMING_SNAKE_CASE:Tuple = df.apply(
lambda snake_case : round(100 * (r[target_metric_key] - sentinel_value) / sentinel_value )
if not math.isnan(r[target_metric_key] )
else 0 , axis="columns" , )
# re-order columns
SCREAMING_SNAKE_CASE:Tuple = [variation_key, target_metric_key, diff_key, *report_metric_keys]
SCREAMING_SNAKE_CASE:Union[str, Any] = df.reindex(snake_case , axis="columns" ) # reorder cols
# capitalize
SCREAMING_SNAKE_CASE:str = df.rename(str.capitalize , axis="columns" )
# make the cols as narrow as possible
SCREAMING_SNAKE_CASE:int = df.rename(lambda snake_case : c.replace("_" , "<br>" ) , axis="columns" )
SCREAMING_SNAKE_CASE:Dict = df.rename(lambda snake_case : c.replace("_" , "\n" ) , axis="columns" )
SCREAMING_SNAKE_CASE:List[Any] = ["", "Copy between the cut-here-lines and paste as is to github or a forum"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results:", df_github.to_markdown(index=snake_case , floatfmt=".2f" )]
report += ["```"]
report += ["*** Setup:", get_versions()]
report += ["*** The benchmark command line was:", get_original_command()]
report += ["```"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results (console):", df_console.to_markdown(index=snake_case , floatfmt=".2f" )]
print("\n\n".join(snake_case ) )
def A_ ( ):
SCREAMING_SNAKE_CASE:Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
"--base-cmd" , default=snake_case , type=snake_case , required=snake_case , help="Base cmd" , )
parser.add_argument(
"--variations" , default=snake_case , type=snake_case , nargs="+" , required=snake_case , help="Multi-dimensional variations, example: '|--fp16|--bf16' '|--tf32'" , )
parser.add_argument(
"--base-variation" , default=snake_case , type=snake_case , help="Baseline variation to compare to. if None the minimal target value will be used to compare against" , )
parser.add_argument(
"--target-metric-key" , default=snake_case , type=snake_case , required=snake_case , help="Target metric key in output_dir/all_results.json, e.g., train_samples_per_second" , )
parser.add_argument(
"--report-metric-keys" , default="" , type=snake_case , help="Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., 'train_loss train_samples" , )
parser.add_argument(
"--repeat-times" , default=1 , type=snake_case , help="How many times to re-run each variation - an average will be reported" , )
parser.add_argument(
"--output_dir" , default="output_benchmark" , type=snake_case , help="The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked" , )
parser.add_argument(
"--verbose" , default=snake_case , action="store_true" , help="Whether to show the outputs of each run or just the benchmark progress" , )
SCREAMING_SNAKE_CASE:int = parser.parse_args()
SCREAMING_SNAKE_CASE:int = args.output_dir
Path(snake_case ).mkdir(exist_ok=snake_case )
SCREAMING_SNAKE_CASE:Optional[int] = get_base_command(snake_case , snake_case )
# split each dimension into its --foo variations
SCREAMING_SNAKE_CASE:Union[str, Any] = [list(map(str.strip , re.split(r"\|" , snake_case ) ) ) for x in args.variations]
# build a cartesian product of dimensions and convert those back into cmd-line arg strings,
# while stripping white space for inputs that were empty
SCREAMING_SNAKE_CASE:str = list(map(str.strip , map(" ".join , itertools.product(*snake_case ) ) ) )
SCREAMING_SNAKE_CASE:Dict = max(len(snake_case ) for x in variations )
# split wanted keys
SCREAMING_SNAKE_CASE:Any = args.report_metric_keys.split()
# capture prints into a log file for convenience
SCREAMING_SNAKE_CASE:Any = F'''benchmark-report-{datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S" )}.txt'''
print(F'''\nNote: each run\'s output is also logged under {output_dir}/log.*.std*.txt''' )
print(F'''and this script\'s output is also piped into {report_fn}''' )
SCREAMING_SNAKE_CASE:Any = Tee(snake_case )
print(F'''\n*** Running {len(snake_case )} benchmarks:''' )
print(F'''Base command: {" ".join(snake_case )}''' )
SCREAMING_SNAKE_CASE:str = "variation"
SCREAMING_SNAKE_CASE:Tuple = []
for id, variation in enumerate(tqdm(snake_case , desc="Total completion: " , leave=snake_case ) ):
SCREAMING_SNAKE_CASE:List[str] = base_cmd + variation.split()
results.append(
process_run(
id + 1 , snake_case , snake_case , snake_case , snake_case , args.target_metric_key , snake_case , args.repeat_times , snake_case , args.verbose , ) )
process_results(snake_case , args.target_metric_key , snake_case , args.base_variation , snake_case )
if __name__ == "__main__":
main()
| 465
| 0
|
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class _a ( unittest.TestCase ):
def lowerCamelCase_ ( self: Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
def lowerCamelCase_ ( self: Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ , lowercase__ = FlaxStableDiffusionPipeline.from_pretrained(
'''stabilityai/stable-diffusion-2''' , revision='''bf16''' , dtype=jnp.bfloataa , )
lowercase__ = '''A painting of a squirrel eating a burger'''
lowercase__ = jax.device_count()
lowercase__ = num_samples * [prompt]
lowercase__ = sd_pipe.prepare_inputs(UpperCamelCase_ )
lowercase__ = replicate(UpperCamelCase_ )
lowercase__ = shard(UpperCamelCase_ )
lowercase__ = jax.random.PRNGKey(0 )
lowercase__ = jax.random.split(UpperCamelCase_ , jax.device_count() )
lowercase__ = sd_pipe(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , num_inference_steps=25 , jit=UpperCamelCase_ )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
lowercase__ = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
lowercase__ = images[0, 253:256, 253:256, -1]
lowercase__ = jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowercase__ = jnp.array([0.4238, 0.4414, 0.4395, 0.4453, 0.4629, 0.4590, 0.4531, 0.45508, 0.4512] )
print(f'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def lowerCamelCase_ ( self: Optional[int] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = '''stabilityai/stable-diffusion-2'''
lowercase__ , lowercase__ = FlaxDPMSolverMultistepScheduler.from_pretrained(UpperCamelCase_ , subfolder='''scheduler''' )
lowercase__ , lowercase__ = FlaxStableDiffusionPipeline.from_pretrained(
UpperCamelCase_ , scheduler=UpperCamelCase_ , revision='''bf16''' , dtype=jnp.bfloataa , )
lowercase__ = scheduler_params
lowercase__ = '''A painting of a squirrel eating a burger'''
lowercase__ = jax.device_count()
lowercase__ = num_samples * [prompt]
lowercase__ = sd_pipe.prepare_inputs(UpperCamelCase_ )
lowercase__ = replicate(UpperCamelCase_ )
lowercase__ = shard(UpperCamelCase_ )
lowercase__ = jax.random.PRNGKey(0 )
lowercase__ = jax.random.split(UpperCamelCase_ , jax.device_count() )
lowercase__ = sd_pipe(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , num_inference_steps=25 , jit=UpperCamelCase_ )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
lowercase__ = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
lowercase__ = images[0, 253:256, 253:256, -1]
lowercase__ = jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowercase__ = jnp.array([0.4336, 0.42969, 0.4453, 0.4199, 0.4297, 0.4531, 0.4434, 0.4434, 0.4297] )
print(f'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 43
|
'''simple docstring'''
def lowerCamelCase ( __lowerCamelCase : str ) ->bool:
return credit_card_number.startswith(("""34""", """35""", """37""", """4""", """5""", """6""") )
def lowerCamelCase ( __lowerCamelCase : str ) ->bool:
_SCREAMING_SNAKE_CASE = credit_card_number
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = len(__lowerCamelCase ) - 2
for i in range(__lowerCamelCase , -1 , -2 ):
# double the value of every second digit
_SCREAMING_SNAKE_CASE = int(cc_number[i] )
digit *= 2
# If doubling of a number results in a two digit number
# i.e greater than 9(e.g., 6 × 2 = 12),
# then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6),
# to get a single digit number.
if digit > 9:
digit %= 10
digit += 1
_SCREAMING_SNAKE_CASE = cc_number[:i] + str(__lowerCamelCase ) + cc_number[i + 1 :]
total += digit
# Sum up the remaining digits
for i in range(len(__lowerCamelCase ) - 1 , -1 , -2 ):
total += int(cc_number[i] )
return total % 10 == 0
def lowerCamelCase ( __lowerCamelCase : str ) ->bool:
_SCREAMING_SNAKE_CASE = F'{credit_card_number} is an invalid credit card number because'
if not credit_card_number.isdigit():
print(F'{error_message} it has nonnumerical characters.' )
return False
if not 13 <= len(__lowerCamelCase ) <= 16:
print(F'{error_message} of its length.' )
return False
if not validate_initial_digits(__lowerCamelCase ):
print(F'{error_message} of its first two digits.' )
return False
if not luhn_validation(__lowerCamelCase ):
print(F'{error_message} it fails the Luhn check.' )
return False
print(F'{credit_card_number} is a valid credit card number.' )
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
validate_credit_card_number("""4111111111111111""")
validate_credit_card_number("""32323""")
| 314
| 0
|
import math
def lowercase ( _a ,_a = 0 ,_a = 0 ) -> list:
UpperCAmelCase_: List[Any] = end or len(_a )
for i in range(_a ,_a ):
UpperCAmelCase_: int = i
UpperCAmelCase_: Union[str, Any] = array[i]
while temp_index != start and temp_index_value < array[temp_index - 1]:
UpperCAmelCase_: Tuple = array[temp_index - 1]
temp_index -= 1
UpperCAmelCase_: int = temp_index_value
return array
def lowercase ( _a ,_a ,_a ) -> None: # Max Heap
UpperCAmelCase_: Union[str, Any] = index
UpperCAmelCase_: str = 2 * index + 1 # Left Node
UpperCAmelCase_: Optional[int] = 2 * index + 2 # Right Node
if left_index < heap_size and array[largest] < array[left_index]:
UpperCAmelCase_: Dict = left_index
if right_index < heap_size and array[largest] < array[right_index]:
UpperCAmelCase_: Optional[int] = right_index
if largest != index:
UpperCAmelCase_: Dict = array[largest], array[index]
heapify(_a ,_a ,_a )
def lowercase ( _a ) -> list:
UpperCAmelCase_: Optional[int] = len(_a )
for i in range(n // 2 ,-1 ,-1 ):
heapify(_a ,_a ,_a )
for i in range(n - 1 ,0 ,-1 ):
UpperCAmelCase_: Any = array[0], array[i]
heapify(_a ,0 ,_a )
return array
def lowercase ( _a ,_a ,_a ,_a ) -> int:
if (array[first_index] > array[middle_index]) != (
array[first_index] > array[last_index]
):
return array[first_index]
elif (array[middle_index] > array[first_index]) != (
array[middle_index] > array[last_index]
):
return array[middle_index]
else:
return array[last_index]
def lowercase ( _a ,_a ,_a ,_a ) -> int:
UpperCAmelCase_: Tuple = low
UpperCAmelCase_: Optional[int] = high
while True:
while array[i] < pivot:
i += 1
j -= 1
while pivot < array[j]:
j -= 1
if i >= j:
return i
UpperCAmelCase_: Any = array[j], array[i]
i += 1
def lowercase ( _a ) -> list:
if len(_a ) == 0:
return array
UpperCAmelCase_: Dict = 2 * math.ceil(math.loga(len(_a ) ) )
UpperCAmelCase_: Dict = 16
return intro_sort(_a ,0 ,len(_a ) ,_a ,_a )
def lowercase ( _a ,_a ,_a ,_a ,_a ) -> list:
while end - start > size_threshold:
if max_depth == 0:
return heap_sort(_a )
max_depth -= 1
UpperCAmelCase_: List[str] = median_of_a(_a ,_a ,start + ((end - start) // 2) + 1 ,end - 1 )
UpperCAmelCase_: int = partition(_a ,_a ,_a ,_a )
intro_sort(_a ,_a ,_a ,_a ,_a )
UpperCAmelCase_: Optional[int] = p
return insertion_sort(_a ,_a ,_a )
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCAmelCase = input("""Enter numbers separated by a comma : """).strip()
_lowerCAmelCase = [float(item) for item in user_input.split(""",""")]
print(sort(unsorted))
| 714
|
import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"""vocab_file""": """vocab.json""",
"""tokenizer_config_file""": """tokenizer_config.json""",
"""merges_file""": """merges.txt""",
}
_lowerCAmelCase = {
"""vocab_file""": {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json"""
),
},
"""tokenizer_config_file""": {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json"""
),
},
"""merges_file""": {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt"""
),
},
}
_lowerCAmelCase = """</w>"""
_lowerCAmelCase = """@@ """
def lowercase ( _a ) -> Dict:
UpperCAmelCase_: Any = set()
UpperCAmelCase_: Optional[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCAmelCase_: List[str] = char
return pairs
# Speech2Text2 has no max input length
_lowerCAmelCase = {"""facebook/s2t-wav2vec2-large-en-de""": 1024}
class UpperCAmelCase__ ( snake_case__ ):
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = ['''input_ids''', '''attention_mask''']
def __init__( self , A__ , A__="<s>" , A__="<pad>" , A__="</s>" , A__="<unk>" , A__=False , A__=None , **A__ , ):
"""simple docstring"""
super().__init__(
unk_token=A__ , bos_token=A__ , eos_token=A__ , pad_token=A__ , do_lower_case=A__ , **A__ , )
UpperCAmelCase_: str = do_lower_case
with open(A__ , encoding="utf-8" ) as vocab_handle:
UpperCAmelCase_: str = json.load(A__ )
UpperCAmelCase_: List[str] = {v: k for k, v in self.encoder.items()}
if merges_file is None:
logger.info(F"No merges files provided. {self.__class__.__name__} can only be used for decoding." )
UpperCAmelCase_: List[Any] = None
UpperCAmelCase_: Optional[int] = None
else:
with open(A__ , encoding="utf-8" ) as merges_handle:
UpperCAmelCase_: List[Any] = merges_handle.read().split("\n" )[:-1]
UpperCAmelCase_: List[Any] = [tuple(merge.split()[:2] ) for merge in merges]
UpperCAmelCase_: Union[str, Any] = dict(zip(A__ , range(len(A__ ) ) ) )
UpperCAmelCase_: Dict = {}
@property
def snake_case_ ( self ):
"""simple docstring"""
return len(self.decoder )
def snake_case_ ( self ):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def snake_case_ ( self , A__ ):
"""simple docstring"""
UpperCAmelCase_: str = tuple(token[:-1] ) + (token[-1] + BPE_TOKEN_MERGES,)
if token in self.cache:
return self.cache[token]
UpperCAmelCase_: Any = get_pairs(A__ )
if not pairs:
return token
while True:
UpperCAmelCase_: List[str] = min(A__ , key=lambda A__ : self.bpe_ranks.get(A__ , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
UpperCAmelCase_ , UpperCAmelCase_: Tuple = bigram
UpperCAmelCase_: Optional[Any] = []
UpperCAmelCase_: Optional[int] = 0
while i < len(A__ ):
try:
UpperCAmelCase_: str = word.index(A__ , A__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
UpperCAmelCase_: str = j
if word[i] == first and i < len(A__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCAmelCase_: str = tuple(A__ )
UpperCAmelCase_: str = new_word
if len(A__ ) == 1:
break
else:
UpperCAmelCase_: Optional[Any] = get_pairs(A__ )
UpperCAmelCase_: str = " ".join(A__ )
if word == "\n " + BPE_TOKEN_MERGES:
UpperCAmelCase_: Tuple = "\n" + BPE_TOKEN_MERGES
if word.endswith(A__ ):
UpperCAmelCase_: Union[str, Any] = word.replace(A__ , "" )
UpperCAmelCase_: Dict = word.replace(" " , A__ )
UpperCAmelCase_: List[Any] = word
return word
def snake_case_ ( self , A__ ):
"""simple docstring"""
if self.bpe_ranks is None:
raise ValueError(
"This tokenizer was instantiated without a `merges.txt` file, so"
" that it can only be used for decoding, not for encoding."
"Make sure to provide `merges.txt` file at instantiation to enable "
"encoding." )
if self.do_lower_case:
UpperCAmelCase_: int = text.lower()
UpperCAmelCase_: Optional[Any] = text.split()
UpperCAmelCase_: Tuple = []
for token in text:
if token:
split_tokens.extend(list(self.bpe(A__ ).split(" " ) ) )
return split_tokens
def snake_case_ ( self , A__ ):
"""simple docstring"""
return self.encoder.get(A__ , self.encoder.get(self.unk_token ) )
def snake_case_ ( self , A__ ):
"""simple docstring"""
UpperCAmelCase_: Optional[Any] = self.decoder.get(A__ , self.unk_token )
return result
def snake_case_ ( self , A__ ):
"""simple docstring"""
UpperCAmelCase_: List[str] = " ".join(A__ )
# make sure @@ tokens are concatenated
UpperCAmelCase_: List[Any] = "".join(string.split(A__ ) )
return string
def snake_case_ ( self , A__ , A__ = None ):
"""simple docstring"""
if not os.path.isdir(A__ ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
UpperCAmelCase_: Tuple = os.path.join(
A__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
UpperCAmelCase_: Optional[Any] = os.path.join(
A__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(A__ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=A__ , ensure_ascii=A__ ) + "\n" )
UpperCAmelCase_: str = 0
if self.bpe_ranks is None:
return (vocab_file,)
with open(A__ , "w" , encoding="utf-8" ) as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda A__ : kv[1] ):
if index != token_index:
logger.warning(
F"Saving vocabulary to {merges_file}: BPE merge indices are not consecutive."
" Please check that the tokenizer is not corrupted!" )
UpperCAmelCase_: Optional[Any] = token_index
writer.write(" ".join(A__ ) + "\n" )
index += 1
return (vocab_file, merges_file)
| 306
| 0
|
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from importlib import import_module
from typing import Dict, List, Optional, Tuple
import numpy as np
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch import nn
from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask
import transformers
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
a__ : Optional[Any] = logging.getLogger(__name__)
@dataclass
class __snake_case :
__lowerCAmelCase = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
__lowerCAmelCase = field(
default=__magic_name__ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
__lowerCAmelCase = field(
default='''NER''' , metadata={'''help''': '''Task type to fine tune in training (e.g. NER, POS, etc)'''} )
__lowerCAmelCase = field(
default=__magic_name__ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
__lowerCAmelCase = field(default=__magic_name__ , metadata={'''help''': '''Set this flag to use fast tokenization.'''} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
__lowerCAmelCase = field(
default=__magic_name__ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
@dataclass
class __snake_case :
__lowerCAmelCase = field(
metadata={'''help''': '''The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task.'''} )
__lowerCAmelCase = field(
default=__magic_name__ , metadata={'''help''': '''Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.'''} , )
__lowerCAmelCase = field(
default=128 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
__lowerCAmelCase = field(
default=__magic_name__ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
def __lowerCamelCase ( ) ->int:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
snake_case__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
snake_case__ , snake_case__ , snake_case__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
snake_case__ , snake_case__ , snake_case__ = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
' --overwrite_output_dir to overcome.' )
snake_case__ = import_module('tasks' )
try:
snake_case__ = getattr(UpperCAmelCase_ , model_args.task_type )
snake_case__ = token_classification_task_clazz()
except AttributeError:
raise ValueError(
f'''Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. '''
f'''Available tasks classes are: {TokenClassificationTask.__subclasses__()}''' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , UpperCAmelCase_ )
# Set seed
set_seed(training_args.seed )
# Prepare CONLL-2003 task
snake_case__ = token_classification_task.get_labels(data_args.labels )
snake_case__ = dict(enumerate(UpperCAmelCase_ ) )
snake_case__ = len(UpperCAmelCase_ )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
snake_case__ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=UpperCAmelCase_ , idalabel=UpperCAmelCase_ , labelaid={label: i for i, label in enumerate(UpperCAmelCase_ )} , cache_dir=model_args.cache_dir , )
snake_case__ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , )
snake_case__ = AutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=UpperCAmelCase_ , cache_dir=model_args.cache_dir , )
# Get datasets
snake_case__ = (
TokenClassificationDataset(
token_classification_task=UpperCAmelCase_ , data_dir=data_args.data_dir , tokenizer=UpperCAmelCase_ , labels=UpperCAmelCase_ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
snake_case__ = (
TokenClassificationDataset(
token_classification_task=UpperCAmelCase_ , data_dir=data_args.data_dir , tokenizer=UpperCAmelCase_ , labels=UpperCAmelCase_ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def align_predictions(UpperCAmelCase_ , UpperCAmelCase_ ) -> Tuple[List[int], List[int]]:
snake_case__ = np.argmax(UpperCAmelCase_ , axis=2 )
snake_case__ , snake_case__ = preds.shape
snake_case__ = [[] for _ in range(UpperCAmelCase_ )]
snake_case__ = [[] for _ in range(UpperCAmelCase_ )]
for i in range(UpperCAmelCase_ ):
for j in range(UpperCAmelCase_ ):
if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:
out_label_list[i].append(label_map[label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
return preds_list, out_label_list
def compute_metrics(UpperCAmelCase_ ) -> Dict:
snake_case__ , snake_case__ = align_predictions(p.predictions , p.label_ids )
return {
"accuracy_score": accuracy_score(UpperCAmelCase_ , UpperCAmelCase_ ),
"precision": precision_score(UpperCAmelCase_ , UpperCAmelCase_ ),
"recall": recall_score(UpperCAmelCase_ , UpperCAmelCase_ ),
"f1": fa_score(UpperCAmelCase_ , UpperCAmelCase_ ),
}
# Data collator
snake_case__ = DataCollatorWithPadding(UpperCAmelCase_ , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
snake_case__ = Trainer(
model=UpperCAmelCase_ , args=UpperCAmelCase_ , train_dataset=UpperCAmelCase_ , eval_dataset=UpperCAmelCase_ , compute_metrics=UpperCAmelCase_ , data_collator=UpperCAmelCase_ , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_process_zero():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
snake_case__ = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
snake_case__ = trainer.evaluate()
snake_case__ = os.path.join(training_args.output_dir , 'eval_results.txt' )
if trainer.is_world_process_zero():
with open(UpperCAmelCase_ , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(' %s = %s' , UpperCAmelCase_ , UpperCAmelCase_ )
writer.write('%s = %s\n' % (key, value) )
results.update(UpperCAmelCase_ )
# Predict
if training_args.do_predict:
snake_case__ = TokenClassificationDataset(
token_classification_task=UpperCAmelCase_ , data_dir=data_args.data_dir , tokenizer=UpperCAmelCase_ , labels=UpperCAmelCase_ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , )
snake_case__ , snake_case__ , snake_case__ = trainer.predict(UpperCAmelCase_ )
snake_case__ , snake_case__ = align_predictions(UpperCAmelCase_ , UpperCAmelCase_ )
snake_case__ = os.path.join(training_args.output_dir , 'test_results.txt' )
if trainer.is_world_process_zero():
with open(UpperCAmelCase_ , 'w' ) as writer:
for key, value in metrics.items():
logger.info(' %s = %s' , UpperCAmelCase_ , UpperCAmelCase_ )
writer.write('%s = %s\n' % (key, value) )
# Save predictions
snake_case__ = os.path.join(training_args.output_dir , 'test_predictions.txt' )
if trainer.is_world_process_zero():
with open(UpperCAmelCase_ , 'w' ) as writer:
with open(os.path.join(data_args.data_dir , 'test.txt' ) , 'r' ) as f:
token_classification_task.write_predictions_to_file(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
return results
def __lowerCamelCase ( UpperCAmelCase_ ) ->Union[str, Any]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 368
|
'''simple docstring'''
from math import loga
def __lowerCamelCase ( UpperCAmelCase_ ) ->int:
if a < 0:
raise ValueError('Input value must be a positive integer' )
elif isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
raise TypeError('Input value must be a \'int\' type' )
return 0 if (a == 0) else int(loga(a & -a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 368
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A_ = {
'''configuration_informer''': [
'''INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''InformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
'''INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''InformerForPrediction''',
'''InformerModel''',
'''InformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
A_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 498
|
"""simple docstring"""
def _lowerCAmelCase ( UpperCAmelCase__ : int = 1_0_0_0 ) ->int:
A__ : Optional[int] = 2**power
A__ : Dict = 0
while n:
A__ , A__ : Tuple = r + n % 1_0, n // 1_0
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 498
| 1
|
def UpperCamelCase ( snake_case__ : list[int] , snake_case__ : str ) -> list[int]:
UpperCamelCase : int = int(snake_case__ )
# Initialize Result
UpperCamelCase : List[Any] = []
# Traverse through all denomination
for denomination in reversed(snake_case__ ):
# Find denominations
while int(snake_case__ ) >= int(snake_case__ ):
total_value -= int(snake_case__ )
answer.append(snake_case__ ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
__UpperCAmelCase = []
__UpperCAmelCase = '''0'''
if (
input('''Do you want to enter your denominations ? (yY/n): ''').strip().lower()
== "y"
):
__UpperCAmelCase = int(input('''Enter the number of denominations you want to add: ''').strip())
for i in range(0, n):
denominations.append(int(input(F"""Denomination {i}: """).strip()))
__UpperCAmelCase = input('''Enter the change you want to make in Indian Currency: ''').strip()
else:
# All denominations of Indian Currency if user does not enter
__UpperCAmelCase = [1, 2, 5, 10, 20, 50, 100, 500, 2_000]
__UpperCAmelCase = input('''Enter the change you want to make: ''').strip()
if int(value) == 0 or int(value) < 0:
print('''The total value cannot be zero or negative.''')
else:
print(F"""Following is minimal change for {value}: """)
__UpperCAmelCase = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=''' ''')
| 40
|
import math
def __lowerCAmelCase ( _UpperCamelCase : int ) -> list[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = 2
SCREAMING_SNAKE_CASE = int(math.sqrt(_UpperCamelCase ) ) # Size of every segment
SCREAMING_SNAKE_CASE = [True] * (end + 1)
SCREAMING_SNAKE_CASE = []
while start <= end:
if temp[start] is True:
in_prime.append(_UpperCamelCase )
for i in range(start * start , end + 1 , _UpperCamelCase ):
SCREAMING_SNAKE_CASE = False
start += 1
prime += in_prime
SCREAMING_SNAKE_CASE = end + 1
SCREAMING_SNAKE_CASE = min(2 * end , _UpperCamelCase )
while low <= n:
SCREAMING_SNAKE_CASE = [True] * (high - low + 1)
for each in in_prime:
SCREAMING_SNAKE_CASE = math.floor(low / each ) * each
if t < low:
t += each
for j in range(_UpperCamelCase , high + 1 , _UpperCamelCase ):
SCREAMING_SNAKE_CASE = False
for j in range(len(_UpperCamelCase ) ):
if temp[j] is True:
prime.append(j + low )
SCREAMING_SNAKE_CASE = high + 1
SCREAMING_SNAKE_CASE = min(high + end , _UpperCamelCase )
return prime
print(sieve(10**6))
| 439
| 0
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {'vocab_file': 'spm_char.model'}
UpperCAmelCase = {
'vocab_file': {
'microsoft/speecht5_asr': 'https://huggingface.co/microsoft/speecht5_asr/resolve/main/spm_char.model',
'microsoft/speecht5_tts': 'https://huggingface.co/microsoft/speecht5_tts/resolve/main/spm_char.model',
'microsoft/speecht5_vc': 'https://huggingface.co/microsoft/speecht5_vc/resolve/main/spm_char.model',
}
}
UpperCAmelCase = {
'microsoft/speecht5_asr': 1024,
'microsoft/speecht5_tts': 1024,
'microsoft/speecht5_vc': 1024,
}
class __snake_case( _lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : int = VOCAB_FILES_NAMES
UpperCAmelCase : int = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase : Dict = ["input_ids", "attention_mask"]
def __init__( self , A_ , A_="<s>" , A_="</s>" , A_="<unk>" , A_="<pad>" , A_ = None , **A_ , ) -> None:
lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=A_ , eos_token=A_ , unk_token=A_ , pad_token=A_ , sp_model_kwargs=self.sp_model_kwargs , **A_ , )
lowerCAmelCase = vocab_file
lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(A_ )
@property
def __snake_case ( self ) -> List[Any]:
return self.sp_model.get_piece_size()
def __snake_case ( self ) -> Dict:
lowerCAmelCase = {self.convert_ids_to_tokens(A_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> str:
lowerCAmelCase = self.__dict__.copy()
lowerCAmelCase = None
return state
def __setstate__( self , A_ ) -> str:
lowerCAmelCase = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
lowerCAmelCase = {}
lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __snake_case ( self , A_ ) -> List[str]:
return self.sp_model.encode(A_ , out_type=A_ )
def __snake_case ( self , A_ ) -> Any:
return self.sp_model.piece_to_id(A_ )
def __snake_case ( self , A_ ) -> Union[str, Any]:
lowerCAmelCase = self.sp_model.IdToPiece(A_ )
return token
def __snake_case ( self , A_ ) -> int:
lowerCAmelCase = []
lowerCAmelCase = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(A_ ) + token
lowerCAmelCase = []
else:
current_sub_tokens.append(A_ )
out_string += self.sp_model.decode(A_ )
return out_string.strip()
def __snake_case ( self , A_ , A_=None ) -> List[int]:
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def __snake_case ( self , A_ , A_ = None , A_ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A_ , token_ids_a=A_ , already_has_special_tokens=A_ )
lowerCAmelCase = [1]
if token_ids_a is None:
return ([0] * len(A_ )) + suffix_ones
return ([0] * len(A_ )) + ([0] * len(A_ )) + suffix_ones
def __snake_case ( self , A_ , A_ = None ) -> Tuple[str]:
if not os.path.isdir(A_ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
lowerCAmelCase = os.path.join(
A_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , A_ )
elif not os.path.isfile(self.vocab_file ):
with open(A_ , """wb""" ) as fi:
lowerCAmelCase = self.sp_model.serialized_model_proto()
fi.write(A_ )
return (out_vocab_file,)
| 344
|
'''simple docstring'''
def _snake_case ( _SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : float ) -> float:
"""simple docstring"""
if density <= 0:
raise ValueError("""Impossible fluid density""" )
if bulk_modulus <= 0:
raise ValueError("""Impossible bulk modulus""" )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 344
| 1
|
from sklearn.metrics import matthews_corrcoef
import datasets
UpperCAmelCase__ = "\nCompute the Matthews correlation coefficient (MCC)\n\nThe Matthews correlation coefficient is used in machine learning as a\nmeasure of the quality of binary and multiclass classifications. It takes\ninto account true and false positives and negatives and is generally\nregarded as a balanced measure which can be used even if the classes are of\nvery different sizes. The MCC is in essence a correlation coefficient value\nbetween -1 and +1. A coefficient of +1 represents a perfect prediction, 0\nan average random prediction and -1 an inverse prediction. The statistic\nis also known as the phi coefficient. [source: Wikipedia]\n"
UpperCAmelCase__ = "\nArgs:\n predictions (list of int): Predicted labels, as returned by a model.\n references (list of int): Ground truth labels.\n sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`.\nReturns:\n matthews_correlation (dict containing float): Matthews correlation.\nExamples:\n Example 1, a basic example with only predictions and references as inputs:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3])\n >>> print(round(results['matthews_correlation'], 2))\n 0.54\n\n Example 2, the same example as above, but also including sample weights:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 3, 1, 1, 1, 2])\n >>> print(round(results['matthews_correlation'], 2))\n 0.1\n\n Example 3, the same example as above, but with sample weights that cause a negative correlation:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 1, 0, 0, 0, 1])\n >>> print(round(results['matthews_correlation'], 2))\n -0.25\n"
UpperCAmelCase__ = "\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a ( datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase_ ( self : str ) -> int:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''int32''' ),
'''references''': datasets.Value('''int32''' ),
} ) , reference_urls=[
'''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html'''
] , )
def UpperCAmelCase_ ( self : Any , lowerCamelCase__ : List[str] , lowerCamelCase__ : str , lowerCamelCase__ : Tuple=None ) -> List[str]:
"""simple docstring"""
return {
"matthews_correlation": float(matthews_corrcoef(lowerCamelCase__ , lowerCamelCase__ , sample_weight=lowerCamelCase__ ) ),
}
| 332
|
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class a :
"""simple docstring"""
def __init__( self : Optional[Any] , lowerCamelCase__ : List[str] , lowerCamelCase__ : List[str]=2 , lowerCamelCase__ : Optional[int]=8 , lowerCamelCase__ : Any=True , lowerCamelCase__ : Any=True , lowerCamelCase__ : Optional[Any]=True , lowerCamelCase__ : Dict=True , lowerCamelCase__ : Any=99 , lowerCamelCase__ : int=16 , lowerCamelCase__ : Tuple=5 , lowerCamelCase__ : int=2 , lowerCamelCase__ : Optional[int]=36 , lowerCamelCase__ : List[str]="gelu" , lowerCamelCase__ : int=0.0 , lowerCamelCase__ : Union[str, Any]=0.0 , lowerCamelCase__ : Union[str, Any]=512 , lowerCamelCase__ : Tuple=16 , lowerCamelCase__ : Union[str, Any]=2 , lowerCamelCase__ : Optional[Any]=0.0_2 , lowerCamelCase__ : List[str]=3 , lowerCamelCase__ : Optional[int]=4 , lowerCamelCase__ : Optional[Any]=None , ) -> int:
"""simple docstring"""
__lowercase = parent
__lowercase = batch_size
__lowercase = seq_length
__lowercase = is_training
__lowercase = use_input_mask
__lowercase = use_token_type_ids
__lowercase = use_labels
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = type_sequence_label_size
__lowercase = initializer_range
__lowercase = num_labels
__lowercase = num_choices
__lowercase = scope
def UpperCAmelCase_ ( self : Optional[int] ) -> Any:
"""simple docstring"""
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase = None
if self.use_input_mask:
__lowercase = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase = None
if self.use_token_type_ids:
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowercase = None
__lowercase = None
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase = ids_tensor([self.batch_size] , self.num_choices )
__lowercase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
return MraConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , )
def UpperCAmelCase_ ( self : int ) -> int:
"""simple docstring"""
__lowercase = self.get_config()
__lowercase = 300
return config
def UpperCAmelCase_ ( self : Optional[Any] ) -> Any:
"""simple docstring"""
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = self.prepare_config_and_inputs()
__lowercase = True
__lowercase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__lowercase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def UpperCAmelCase_ ( self : Tuple , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Dict , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : List[str] , lowerCamelCase__ : str ) -> List[Any]:
"""simple docstring"""
__lowercase = MraModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowercase = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ )
__lowercase = model(lowerCamelCase__ , token_type_ids=lowerCamelCase__ )
__lowercase = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase_ ( self : Any , lowerCamelCase__ : Any , lowerCamelCase__ : List[Any] , lowerCamelCase__ : int , lowerCamelCase__ : int , lowerCamelCase__ : Any , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Any , lowerCamelCase__ : Any , lowerCamelCase__ : Any , ) -> str:
"""simple docstring"""
__lowercase = True
__lowercase = MraModel(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowercase = model(
lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , encoder_hidden_states=lowerCamelCase__ , encoder_attention_mask=lowerCamelCase__ , )
__lowercase = model(
lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , encoder_hidden_states=lowerCamelCase__ , )
__lowercase = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase_ ( self : Optional[Any] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Tuple , lowerCamelCase__ : int , lowerCamelCase__ : Any , lowerCamelCase__ : Any , lowerCamelCase__ : str , lowerCamelCase__ : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase = MraForMaskedLM(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowercase = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase_ ( self : Union[str, Any] , lowerCamelCase__ : Any , lowerCamelCase__ : int , lowerCamelCase__ : Dict , lowerCamelCase__ : Any , lowerCamelCase__ : List[str] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : str ) -> Dict:
"""simple docstring"""
__lowercase = MraForQuestionAnswering(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowercase = model(
lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , start_positions=lowerCamelCase__ , end_positions=lowerCamelCase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase_ ( self : Optional[int] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Tuple , lowerCamelCase__ : int , lowerCamelCase__ : List[str] , lowerCamelCase__ : Dict , lowerCamelCase__ : Dict ) -> str:
"""simple docstring"""
__lowercase = self.num_labels
__lowercase = MraForSequenceClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowercase = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase_ ( self : Dict , lowerCamelCase__ : Any , lowerCamelCase__ : Tuple , lowerCamelCase__ : Dict , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Tuple , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Any ) -> List[str]:
"""simple docstring"""
__lowercase = self.num_labels
__lowercase = MraForTokenClassification(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowercase = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase_ ( self : int , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : List[str] , lowerCamelCase__ : Any , lowerCamelCase__ : Any , lowerCamelCase__ : Any , lowerCamelCase__ : int , lowerCamelCase__ : Tuple ) -> Tuple:
"""simple docstring"""
__lowercase = self.num_choices
__lowercase = MraForMultipleChoice(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowercase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase = model(
lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase_ ( self : List[Any] ) -> List[str]:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = config_and_inputs
__lowercase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class a ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : Dict = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
UpperCamelCase_ : Dict = False
UpperCamelCase_ : List[str] = False
UpperCamelCase_ : Optional[int] = False
UpperCamelCase_ : str = False
UpperCamelCase_ : int = ()
def UpperCAmelCase_ ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
__lowercase = MraModelTester(self )
__lowercase = ConfigTester(self , config_class=lowerCamelCase__ , hidden_size=37 )
def UpperCAmelCase_ ( self : Any ) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def UpperCAmelCase_ ( self : List[str] ) -> Dict:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__lowercase = type
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def UpperCAmelCase_ ( self : int ) -> Any:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCamelCase__ )
def UpperCAmelCase_ ( self : str ) -> str:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowerCamelCase__ )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCamelCase__ )
def UpperCAmelCase_ ( self : str ) -> Optional[int]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCamelCase__ )
def UpperCAmelCase_ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCamelCase__ )
@slow
def UpperCAmelCase_ ( self : Any ) -> Dict:
"""simple docstring"""
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = MraModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
@unittest.skip(reason='''MRA does not output attentions''' )
def UpperCAmelCase_ ( self : Dict ) -> Any:
"""simple docstring"""
return
@require_torch
class a ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
__lowercase = MraModel.from_pretrained('''uw-madison/mra-base-512-4''' )
__lowercase = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
__lowercase = model(lowerCamelCase__ )[0]
__lowercase = torch.Size((1, 256, 768) )
self.assertEqual(output.shape , lowerCamelCase__ )
__lowercase = torch.tensor(
[[[-0.0_1_4_0, 0.0_8_3_0, -0.0_3_8_1], [0.1_5_4_6, 0.1_4_0_2, 0.0_2_2_0], [0.1_1_6_2, 0.0_8_5_1, 0.0_1_6_5]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCamelCase__ , atol=1e-4 ) )
@slow
def UpperCAmelCase_ ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
__lowercase = MraForMaskedLM.from_pretrained('''uw-madison/mra-base-512-4''' )
__lowercase = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
__lowercase = model(lowerCamelCase__ )[0]
__lowercase = 50_265
__lowercase = torch.Size((1, 256, vocab_size) )
self.assertEqual(output.shape , lowerCamelCase__ )
__lowercase = torch.tensor(
[[[9.2_5_9_5, -3.6_0_3_8, 1_1.8_8_1_9], [9.3_8_6_9, -3.2_6_9_3, 1_1.0_9_5_6], [1_1.8_5_2_4, -3.4_9_3_8, 1_3.1_2_1_0]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCamelCase__ , atol=1e-4 ) )
@slow
def UpperCAmelCase_ ( self : List[str] ) -> Tuple:
"""simple docstring"""
__lowercase = MraForMaskedLM.from_pretrained('''uw-madison/mra-base-4096-8-d3''' )
__lowercase = torch.arange(4_096 ).unsqueeze(0 )
with torch.no_grad():
__lowercase = model(lowerCamelCase__ )[0]
__lowercase = 50_265
__lowercase = torch.Size((1, 4_096, vocab_size) )
self.assertEqual(output.shape , lowerCamelCase__ )
__lowercase = torch.tensor(
[[[5.4_7_8_9, -2.3_5_6_4, 7.5_0_6_4], [7.9_0_6_7, -1.3_3_6_9, 9.9_6_6_8], [9.0_7_1_2, -1.8_1_0_6, 7.0_3_8_0]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCamelCase__ , atol=1e-4 ) )
| 332
| 1
|
'''simple docstring'''
import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def SCREAMING_SNAKE_CASE ( a_ : Tuple ):
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0X4e00 and cp <= 0X9fff)
or (cp >= 0X3400 and cp <= 0X4dbf) #
or (cp >= 0X20000 and cp <= 0X2a6df) #
or (cp >= 0X2a700 and cp <= 0X2b73f) #
or (cp >= 0X2b740 and cp <= 0X2b81f) #
or (cp >= 0X2b820 and cp <= 0X2ceaf) #
or (cp >= 0Xf900 and cp <= 0Xfaff)
or (cp >= 0X2f800 and cp <= 0X2fa1f) #
): #
return True
return False
def SCREAMING_SNAKE_CASE ( a_ : str ):
# word like '180' or '身高' or '神'
for char in word:
__a = ord(a_ )
if not _is_chinese_char(a_ ):
return 0
return 1
def SCREAMING_SNAKE_CASE ( a_ : List[str] ):
__a = set()
for token in tokens:
__a = len(a_ ) > 1 and is_chinese(a_ )
if chinese_word:
word_set.add(a_ )
__a = list(a_ )
return word_list
def SCREAMING_SNAKE_CASE ( a_ : List[str] , a_ : set() ):
if not chinese_word_set:
return bert_tokens
__a = max([len(a_ ) for w in chinese_word_set] )
__a = bert_tokens
__a , __a = 0, len(a_ )
while start < end:
__a = True
if is_chinese(bert_word[start] ):
__a = min(end - start , a_ )
for i in range(a_ , 1 , -1 ):
__a = ''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
__a = '##' + bert_word[j]
__a = start + i
__a = False
break
if single_word:
start += 1
return bert_word
def SCREAMING_SNAKE_CASE ( a_ : List[str] , a_ : LTP , a_ : BertTokenizer ):
__a = []
for i in range(0 , len(a_ ) , 100 ):
__a = ltp_tokenizer.seg(lines[i : i + 100] )[0]
__a = [get_chinese_word(a_ ) for r in res]
ltp_res.extend(a_ )
assert len(a_ ) == len(a_ )
__a = []
for i in range(0 , len(a_ ) , 100 ):
__a = bert_tokenizer(lines[i : i + 100] , add_special_tokens=a_ , truncation=a_ , max_length=512 )
bert_res.extend(res['input_ids'] )
assert len(a_ ) == len(a_ )
__a = []
for input_ids, chinese_word in zip(a_ , a_ ):
__a = []
for id in input_ids:
__a = bert_tokenizer._convert_id_to_token(a_ )
input_tokens.append(a_ )
__a = add_sub_symbol(a_ , a_ )
__a = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(a_ ):
if token[:2] == "##":
__a = token[2:]
# save chinese tokens' pos
if len(a_ ) == 1 and _is_chinese_char(ord(a_ ) ):
ref_id.append(a_ )
ref_ids.append(a_ )
assert len(a_ ) == len(a_ )
return ref_ids
def SCREAMING_SNAKE_CASE ( a_ : str ):
# For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm)
# If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp)
with open(args.file_name , 'r' , encoding='utf-8' ) as f:
__a = f.readlines()
__a = [line.strip() for line in data if len(a_ ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
__a = LTP(args.ltp ) # faster in GPU device
__a = BertTokenizer.from_pretrained(args.bert )
__a = prepare_ref(a_ , a_ , a_ )
with open(args.save_path , 'w' , encoding='utf-8' ) as f:
__a = [json.dumps(a_ ) + '\n' for ref in ref_ids]
f.writelines(a_ )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser(description="prepare_chinese_ref")
parser.add_argument(
"--file_name",
type=str,
default="./resources/chinese-demo.txt",
help="file need process, same as training data in lm",
)
parser.add_argument(
"--ltp", type=str, default="./resources/ltp", help="resources for LTP tokenizer, usually a path"
)
parser.add_argument("--bert", type=str, default="./resources/robert", help="resources for Bert tokenizer")
parser.add_argument("--save_path", type=str, default="./resources/ref.txt", help="path to save res")
UpperCAmelCase_ = parser.parse_args()
main(args)
| 490
|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
UpperCAmelCase_ = logging.get_logger(__name__)
class __lowercase ( __magic_name__ ):
_a = ["""pixel_values"""]
def __init__( self , UpperCamelCase = True , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = PILImageResampling.BILINEAR , UpperCamelCase = True , UpperCamelCase = 1 / 255 , UpperCamelCase = True , UpperCamelCase = None , UpperCamelCase = None , **UpperCamelCase , ) -> None:
super().__init__(**UpperCamelCase )
__a = size if size is not None else {'shortest_edge': 384}
__a = get_size_dict(UpperCamelCase , default_to_square=UpperCamelCase )
__a = do_resize
__a = size
# Default value set here for backwards compatibility where the value in config is None
__a = crop_pct if crop_pct is not None else 224 / 256
__a = resample
__a = do_rescale
__a = rescale_factor
__a = do_normalize
__a = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__a = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCamelCase__ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = PILImageResampling.BICUBIC , UpperCamelCase = None , **UpperCamelCase , ) -> np.ndarray:
__a = get_size_dict(UpperCamelCase , default_to_square=UpperCamelCase )
if "shortest_edge" not in size:
raise ValueError(f"Size dictionary must contain 'shortest_edge' key. Got {size.keys()}" )
__a = size['shortest_edge']
if shortest_edge < 384:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
__a = int(shortest_edge / crop_pct )
__a = get_resize_output_image_size(UpperCamelCase , size=UpperCamelCase , default_to_square=UpperCamelCase )
__a = resize(image=UpperCamelCase , size=UpperCamelCase , resample=UpperCamelCase , data_format=UpperCamelCase , **UpperCamelCase )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=UpperCamelCase , size=(shortest_edge, shortest_edge) , data_format=UpperCamelCase , **UpperCamelCase )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
UpperCamelCase , size=(shortest_edge, shortest_edge) , resample=UpperCamelCase , data_format=UpperCamelCase , **UpperCamelCase )
def UpperCamelCase__ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase = None , **UpperCamelCase , ) -> Any:
return rescale(UpperCamelCase , scale=UpperCamelCase , data_format=UpperCamelCase , **UpperCamelCase )
def UpperCamelCase__ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = None , **UpperCamelCase , ) -> np.ndarray:
return normalize(UpperCamelCase , mean=UpperCamelCase , std=UpperCamelCase , data_format=UpperCamelCase , **UpperCamelCase )
def UpperCamelCase__ ( self , UpperCamelCase , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = ChannelDimension.FIRST , **UpperCamelCase , ) -> PIL.Image.Image:
__a = do_resize if do_resize is not None else self.do_resize
__a = crop_pct if crop_pct is not None else self.crop_pct
__a = resample if resample is not None else self.resample
__a = do_rescale if do_rescale is not None else self.do_rescale
__a = rescale_factor if rescale_factor is not None else self.rescale_factor
__a = do_normalize if do_normalize is not None else self.do_normalize
__a = image_mean if image_mean is not None else self.image_mean
__a = image_std if image_std is not None else self.image_std
__a = size if size is not None else self.size
__a = get_size_dict(UpperCamelCase , default_to_square=UpperCamelCase )
__a = make_list_of_images(UpperCamelCase )
if not valid_images(UpperCamelCase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_resize and size["shortest_edge"] < 384 and crop_pct is None:
raise ValueError('crop_pct must be specified if size < 384.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
__a = [to_numpy_array(UpperCamelCase ) for image in images]
if do_resize:
__a = [self.resize(image=UpperCamelCase , size=UpperCamelCase , crop_pct=UpperCamelCase , resample=UpperCamelCase ) for image in images]
if do_rescale:
__a = [self.rescale(image=UpperCamelCase , scale=UpperCamelCase ) for image in images]
if do_normalize:
__a = [self.normalize(image=UpperCamelCase , mean=UpperCamelCase , std=UpperCamelCase ) for image in images]
__a = [to_channel_dimension_format(UpperCamelCase , UpperCamelCase ) for image in images]
__a = {'pixel_values': images}
return BatchFeature(data=UpperCamelCase , tensor_type=UpperCamelCase )
| 490
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ : int = logging.get_logger(__name__)
lowerCamelCase__ : Any = {
"MIT/ast-finetuned-audioset-10-10-0.4593": (
"https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json"
),
}
class _snake_case ( UpperCamelCase__ ):
__lowerCAmelCase : int = """audio-spectrogram-transformer"""
def __init__( self , SCREAMING_SNAKE_CASE_=7_68 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=30_72 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0_2 , SCREAMING_SNAKE_CASE_=1E-12 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=10 , SCREAMING_SNAKE_CASE_=10 , SCREAMING_SNAKE_CASE_=10_24 , SCREAMING_SNAKE_CASE_=1_28 , **SCREAMING_SNAKE_CASE_ , ):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[Any] = hidden_size
lowercase__ : Tuple = num_hidden_layers
lowercase__ : Dict = num_attention_heads
lowercase__ : Dict = intermediate_size
lowercase__ : Tuple = hidden_act
lowercase__ : Tuple = hidden_dropout_prob
lowercase__ : List[Any] = attention_probs_dropout_prob
lowercase__ : str = initializer_range
lowercase__ : List[str] = layer_norm_eps
lowercase__ : Optional[int] = patch_size
lowercase__ : int = qkv_bias
lowercase__ : str = frequency_stride
lowercase__ : Optional[Any] = time_stride
lowercase__ : Dict = max_length
lowercase__ : Tuple = num_mel_bins
| 12
|
from ..utils import DummyObject, requires_backends
class a__ ( metaclass=UpperCamelCase__ ):
a : str = ["""sentencepiece"""]
def __init__( self , *A , **A ) -> Any:
'''simple docstring'''
requires_backends(self , ["sentencepiece"] )
class a__ ( metaclass=UpperCamelCase__ ):
a : Optional[Any] = ["""sentencepiece"""]
def __init__( self , *A , **A ) -> Dict:
'''simple docstring'''
requires_backends(self , ["sentencepiece"] )
class a__ ( metaclass=UpperCamelCase__ ):
a : List[str] = ["""sentencepiece"""]
def __init__( self , *A , **A ) -> Tuple:
'''simple docstring'''
requires_backends(self , ["sentencepiece"] )
class a__ ( metaclass=UpperCamelCase__ ):
a : Tuple = ["""sentencepiece"""]
def __init__( self , *A , **A ) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ["sentencepiece"] )
class a__ ( metaclass=UpperCamelCase__ ):
a : Any = ["""sentencepiece"""]
def __init__( self , *A , **A ) -> Dict:
'''simple docstring'''
requires_backends(self , ["sentencepiece"] )
class a__ ( metaclass=UpperCamelCase__ ):
a : Optional[Any] = ["""sentencepiece"""]
def __init__( self , *A , **A ) -> str:
'''simple docstring'''
requires_backends(self , ["sentencepiece"] )
class a__ ( metaclass=UpperCamelCase__ ):
a : List[Any] = ["""sentencepiece"""]
def __init__( self , *A , **A ) -> str:
'''simple docstring'''
requires_backends(self , ["sentencepiece"] )
class a__ ( metaclass=UpperCamelCase__ ):
a : Dict = ["""sentencepiece"""]
def __init__( self , *A , **A ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ["sentencepiece"] )
class a__ ( metaclass=UpperCamelCase__ ):
a : Dict = ["""sentencepiece"""]
def __init__( self , *A , **A ) -> List[Any]:
'''simple docstring'''
requires_backends(self , ["sentencepiece"] )
class a__ ( metaclass=UpperCamelCase__ ):
a : Tuple = ["""sentencepiece"""]
def __init__( self , *A , **A ) -> Tuple:
'''simple docstring'''
requires_backends(self , ["sentencepiece"] )
class a__ ( metaclass=UpperCamelCase__ ):
a : Any = ["""sentencepiece"""]
def __init__( self , *A , **A ) -> List[Any]:
'''simple docstring'''
requires_backends(self , ["sentencepiece"] )
class a__ ( metaclass=UpperCamelCase__ ):
a : Tuple = ["""sentencepiece"""]
def __init__( self , *A , **A ) -> Any:
'''simple docstring'''
requires_backends(self , ["sentencepiece"] )
class a__ ( metaclass=UpperCamelCase__ ):
a : Union[str, Any] = ["""sentencepiece"""]
def __init__( self , *A , **A ) -> List[str]:
'''simple docstring'''
requires_backends(self , ["sentencepiece"] )
class a__ ( metaclass=UpperCamelCase__ ):
a : str = ["""sentencepiece"""]
def __init__( self , *A , **A ) -> Optional[Any]:
'''simple docstring'''
requires_backends(self , ["sentencepiece"] )
class a__ ( metaclass=UpperCamelCase__ ):
a : Optional[int] = ["""sentencepiece"""]
def __init__( self , *A , **A ) -> Dict:
'''simple docstring'''
requires_backends(self , ["sentencepiece"] )
class a__ ( metaclass=UpperCamelCase__ ):
a : List[str] = ["""sentencepiece"""]
def __init__( self , *A , **A ) -> Any:
'''simple docstring'''
requires_backends(self , ["sentencepiece"] )
class a__ ( metaclass=UpperCamelCase__ ):
a : Dict = ["""sentencepiece"""]
def __init__( self , *A , **A ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ["sentencepiece"] )
class a__ ( metaclass=UpperCamelCase__ ):
a : List[Any] = ["""sentencepiece"""]
def __init__( self , *A , **A ) -> Any:
'''simple docstring'''
requires_backends(self , ["sentencepiece"] )
class a__ ( metaclass=UpperCamelCase__ ):
a : Tuple = ["""sentencepiece"""]
def __init__( self , *A , **A ) -> List[Any]:
'''simple docstring'''
requires_backends(self , ["sentencepiece"] )
class a__ ( metaclass=UpperCamelCase__ ):
a : List[str] = ["""sentencepiece"""]
def __init__( self , *A , **A ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ["sentencepiece"] )
class a__ ( metaclass=UpperCamelCase__ ):
a : Tuple = ["""sentencepiece"""]
def __init__( self , *A , **A ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ["sentencepiece"] )
class a__ ( metaclass=UpperCamelCase__ ):
a : Dict = ["""sentencepiece"""]
def __init__( self , *A , **A ) -> int:
'''simple docstring'''
requires_backends(self , ["sentencepiece"] )
class a__ ( metaclass=UpperCamelCase__ ):
a : Tuple = ["""sentencepiece"""]
def __init__( self , *A , **A ) -> Any:
'''simple docstring'''
requires_backends(self , ["sentencepiece"] )
class a__ ( metaclass=UpperCamelCase__ ):
a : Tuple = ["""sentencepiece"""]
def __init__( self , *A , **A ) -> List[Any]:
'''simple docstring'''
requires_backends(self , ["sentencepiece"] )
class a__ ( metaclass=UpperCamelCase__ ):
a : List[str] = ["""sentencepiece"""]
def __init__( self , *A , **A ) -> int:
'''simple docstring'''
requires_backends(self , ["sentencepiece"] )
class a__ ( metaclass=UpperCamelCase__ ):
a : str = ["""sentencepiece"""]
def __init__( self , *A , **A ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ["sentencepiece"] )
class a__ ( metaclass=UpperCamelCase__ ):
a : Tuple = ["""sentencepiece"""]
def __init__( self , *A , **A ) -> Any:
'''simple docstring'''
requires_backends(self , ["sentencepiece"] )
class a__ ( metaclass=UpperCamelCase__ ):
a : Optional[int] = ["""sentencepiece"""]
def __init__( self , *A , **A ) -> List[str]:
'''simple docstring'''
requires_backends(self , ["sentencepiece"] )
class a__ ( metaclass=UpperCamelCase__ ):
a : int = ["""sentencepiece"""]
def __init__( self , *A , **A ) -> List[str]:
'''simple docstring'''
requires_backends(self , ["sentencepiece"] )
class a__ ( metaclass=UpperCamelCase__ ):
a : Union[str, Any] = ["""sentencepiece"""]
def __init__( self , *A , **A ) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ["sentencepiece"] )
class a__ ( metaclass=UpperCamelCase__ ):
a : Optional[int] = ["""sentencepiece"""]
def __init__( self , *A , **A ) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ["sentencepiece"] )
| 515
| 0
|
'''simple docstring'''
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def lowercase ( lowerCAmelCase : Any):
"""simple docstring"""
def wrapper(*lowerCAmelCase : int , **lowerCAmelCase : Optional[int]):
_A : Dict = timeit.default_timer()
_A : int = func(*lowerCAmelCase , **lowerCAmelCase)
_A : Any = timeit.default_timer() - starttime
return delta
_A : Tuple = func.__name__
return wrapper
def lowercase ( lowerCAmelCase : dict , lowerCAmelCase : Union[str, Any]=100 , lowerCAmelCase : Tuple=None):
"""simple docstring"""
_A : Any = []
_A : Optional[int] = seq_shapes or {}
for i in range(lowerCAmelCase):
_A : Union[str, Any] = {}
for col_id, (k, v) in enumerate(features.items()):
if isinstance(lowerCAmelCase , _ArrayXD):
_A : Any = np.random.rand(*v.shape).astype(v.dtype)
elif isinstance(lowerCAmelCase , datasets.Value):
if v.dtype == "string":
_A : str = "The small grey turtle was surprisingly fast when challenged."
else:
_A : List[Any] = np.random.randint(10 , size=1).astype(v.dtype).item()
elif isinstance(lowerCAmelCase , datasets.Sequence):
while isinstance(lowerCAmelCase , datasets.Sequence):
_A : int = v.feature
_A : int = seq_shapes[k]
_A : Dict = np.random.rand(*lowerCAmelCase).astype(v.dtype)
_A : Dict = data
dummy_data.append((i, example))
return dummy_data
def lowercase ( lowerCAmelCase : int , lowerCAmelCase : List[Any] , lowerCAmelCase : str=100 , lowerCAmelCase : List[str]=None):
"""simple docstring"""
_A : Any = generate_examples(lowerCAmelCase , num_examples=lowerCAmelCase , seq_shapes=lowerCAmelCase)
with ArrowWriter(features=lowerCAmelCase , path=lowerCAmelCase) as writer:
for key, record in dummy_data:
_A : List[Any] = features.encode_example(lowerCAmelCase)
writer.write(lowerCAmelCase)
_A : Tuple = writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
f"""Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.""")
_A : List[str] = datasets.Dataset.from_file(filename=lowerCAmelCase , info=datasets.DatasetInfo(features=lowerCAmelCase))
return dataset
| 721
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
__UpperCamelCase : str = {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/config.json''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/config.json''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/config.json''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/config.json''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/config.json''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/config.json''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json''',
}
class lowerCamelCase__ ( snake_case_ ):
"""simple docstring"""
__magic_name__ = """albert"""
def __init__( self , UpperCAmelCase__=3_0_0_0_0 , UpperCAmelCase__=1_2_8 , UpperCAmelCase__=4_0_9_6 , UpperCAmelCase__=1_2 , UpperCAmelCase__=1 , UpperCAmelCase__=6_4 , UpperCAmelCase__=1_6_3_8_4 , UpperCAmelCase__=1 , UpperCAmelCase__="gelu_new" , UpperCAmelCase__=0 , UpperCAmelCase__=0 , UpperCAmelCase__=5_1_2 , UpperCAmelCase__=2 , UpperCAmelCase__=0.0_2 , UpperCAmelCase__=1e-12 , UpperCAmelCase__=0.1 , UpperCAmelCase__="absolute" , UpperCAmelCase__=0 , UpperCAmelCase__=2 , UpperCAmelCase__=3 , **UpperCAmelCase__ , ) -> Tuple:
super().__init__(pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , **UpperCAmelCase__ )
_A : Optional[Any] = vocab_size
_A : Optional[int] = embedding_size
_A : str = hidden_size
_A : Union[str, Any] = num_hidden_layers
_A : Optional[int] = num_hidden_groups
_A : Optional[Any] = num_attention_heads
_A : Tuple = inner_group_num
_A : Tuple = hidden_act
_A : List[Any] = intermediate_size
_A : str = hidden_dropout_prob
_A : str = attention_probs_dropout_prob
_A : List[str] = max_position_embeddings
_A : List[Any] = type_vocab_size
_A : Any = initializer_range
_A : Tuple = layer_norm_eps
_A : Dict = classifier_dropout_prob
_A : Union[str, Any] = position_embedding_type
class lowerCamelCase__ ( snake_case_ ):
"""simple docstring"""
@property
def _lowerCamelCase ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_A : Dict = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
_A : Any = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 417
| 0
|
"""simple docstring"""
def UpperCamelCase (SCREAMING_SNAKE_CASE ):
UpperCamelCase : int = 0
# if input_string is "aba" than new_input_string become "a|b|a"
UpperCamelCase : str = """"""
UpperCamelCase : Dict = """"""
# append each character + "|" in new_string for range(0, length-1)
for i in input_string[: len(SCREAMING_SNAKE_CASE ) - 1]:
new_input_string += i + "|"
# append last character
new_input_string += input_string[-1]
# we will store the starting and ending of previous furthest ending palindromic
# substring
UpperCamelCase , UpperCamelCase : int = 0, 0
# length[i] shows the length of palindromic substring with center i
UpperCamelCase : List[Any] = [1 for i in range(len(SCREAMING_SNAKE_CASE ) )]
# for each character in new_string find corresponding palindromic string
UpperCamelCase : Optional[Any] = 0
for j in range(len(SCREAMING_SNAKE_CASE ) ):
UpperCamelCase : Union[str, Any] = 1 if j > r else min(length[l + r - j] // 2 , r - j + 1 )
while (
j - k >= 0
and j + k < len(SCREAMING_SNAKE_CASE )
and new_input_string[k + j] == new_input_string[j - k]
):
k += 1
UpperCamelCase : int = 2 * k - 1
# does this string is ending after the previously explored end (that is r) ?
# if yes the update the new r to the last index of this
if j + k - 1 > r:
UpperCamelCase : int = j - k + 1 # noqa: E741
UpperCamelCase : List[Any] = j + k - 1
# update max_length and start position
if max_length < length[j]:
UpperCamelCase : List[str] = length[j]
UpperCamelCase : List[str] = j
# create that string
UpperCamelCase : Optional[Any] = new_input_string[start - max_length // 2 : start + max_length // 2 + 1]
for i in s:
if i != "|":
output_string += i
return output_string
if __name__ == "__main__":
import doctest
doctest.testmod()
| 102
|
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : Optional[int] = ''''''
_lowercase : str = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
_lowercase : str = None # compression type in fsspec. ex: "gzip"
_lowercase : str = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self : List[Any] , UpperCamelCase__ : str = "" , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : Optional[dict] = None , **UpperCamelCase__ : List[Any]):
'''simple docstring'''
super().__init__(self , **UpperCamelCase__)
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
snake_case__ = fsspec.open(
UpperCamelCase__ , mode="""rb""" , protocol=UpperCamelCase__ , compression=self.compression , client_kwargs={
"""requote_redirect_url""": False, # see https://github.com/huggingface/datasets/pull/5459
"""trust_env""": True, # Enable reading proxy env variables.
**(target_options or {}).pop("""client_kwargs""" , {}), # To avoid issues if it was already passed.
} , **(target_options or {}) , )
snake_case__ = os.path.basename(self.file.path.split("""::""")[0])
snake_case__ = (
self.compressed_name[: self.compressed_name.rindex(""".""")]
if """.""" in self.compressed_name
else self.compressed_name
)
snake_case__ = None
@classmethod
def __magic_name__ ( cls : Union[str, Any] , UpperCamelCase__ : List[Any]):
'''simple docstring'''
return super()._strip_protocol(UpperCamelCase__).lstrip("""/""")
def __magic_name__ ( self : Dict):
'''simple docstring'''
if self.dir_cache is None:
snake_case__ = {**self.file.fs.info(self.file.path), """name""": self.uncompressed_name}
snake_case__ = {f["""name"""]: f}
def __magic_name__ ( self : Optional[int] , UpperCamelCase__ : str):
'''simple docstring'''
return self.file.open().read()
def __magic_name__ ( self : int , UpperCamelCase__ : str , UpperCamelCase__ : str = "rb" , UpperCamelCase__ : Any=None , UpperCamelCase__ : Union[str, Any]=True , UpperCamelCase__ : Optional[int]=None , **UpperCamelCase__ : Optional[Any] , ):
'''simple docstring'''
snake_case__ = self._strip_protocol(UpperCamelCase__)
if mode != "rb":
raise ValueError(F'''Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'''')
return self.file.open()
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : Dict = '''bz2'''
_lowercase : Dict = '''bz2'''
_lowercase : Optional[int] = '''.bz2'''
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : Dict = '''gzip'''
_lowercase : List[str] = '''gzip'''
_lowercase : Any = '''.gz'''
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : str = '''lz4'''
_lowercase : List[Any] = '''lz4'''
_lowercase : Dict = '''.lz4'''
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : Optional[int] = '''xz'''
_lowercase : Union[str, Any] = '''xz'''
_lowercase : Optional[int] = '''.xz'''
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : Optional[int] = '''zstd'''
_lowercase : Tuple = '''zstd'''
_lowercase : Union[str, Any] = '''.zst'''
def __init__( self : str , UpperCamelCase__ : str , UpperCamelCase__ : str = "rb" , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : Optional[dict] = None , UpperCamelCase__ : int = DEFAULT_BLOCK_SIZE , **UpperCamelCase__ : int , ):
'''simple docstring'''
super().__init__(
fo=UpperCamelCase__ , mode=UpperCamelCase__ , target_protocol=UpperCamelCase__ , target_options=UpperCamelCase__ , block_size=UpperCamelCase__ , **UpperCamelCase__ , )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
snake_case__ = self.file.__enter__
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : Tuple , UpperCamelCase__ : str):
'''simple docstring'''
snake_case__ = file_
def __enter__( self : List[str]):
'''simple docstring'''
self._file.__enter__()
return self
def __exit__( self : Dict , *UpperCamelCase__ : str , **UpperCamelCase__ : Optional[Any]):
'''simple docstring'''
self._file.__exit__(*UpperCamelCase__ , **UpperCamelCase__)
def __iter__( self : Any):
'''simple docstring'''
return iter(self._file)
def __magic_name__ ( self : List[str]):
'''simple docstring'''
return next(self._file)
def __getattr__( self : Any , UpperCamelCase__ : int):
'''simple docstring'''
return getattr(self._file , UpperCamelCase__)
def fixed_enter(*UpperCamelCase__ : int , **UpperCamelCase__ : int):
return WrappedFile(_enter(*UpperCamelCase__ , **UpperCamelCase__))
snake_case__ = fixed_enter
| 654
| 0
|
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
'''stable diffusion controlnet''',
'''0.22.0''',
'''Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.''',
standard_warn=False,
stacklevel=3,
)
| 530
|
import unittest
from transformers import SPIECE_UNDERLINE, ReformerTokenizer, ReformerTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__magic_name__ = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class _SCREAMING_SNAKE_CASE ( __UpperCamelCase , unittest.TestCase ):
_A : Optional[Any] = ReformerTokenizer
_A : str = ReformerTokenizerFast
_A : List[str] = True
_A : Tuple = False
_A : str = True
def A_ ( self ):
super().setUp()
snake_case__ = ReformerTokenizer(lowerCamelCase , keep_accents=lowerCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
def A_ ( self ):
snake_case__ = "<s>"
snake_case__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase ) , lowerCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase ) , lowerCamelCase )
def A_ ( self ):
snake_case__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<unk>" )
self.assertEqual(vocab_keys[1] , "<s>" )
self.assertEqual(vocab_keys[-1] , "j" )
self.assertEqual(len(lowerCamelCase ) , 10_00 )
def A_ ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 10_00 )
def A_ ( self ):
if not self.test_rust_tokenizer:
return
snake_case__ = self.get_tokenizer()
snake_case__ = self.get_rust_tokenizer()
snake_case__ = "I was born in 92000, and this is falsé."
snake_case__ = tokenizer.tokenize(lowerCamelCase )
snake_case__ = rust_tokenizer.tokenize(lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
snake_case__ = tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
snake_case__ = rust_tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
snake_case__ = self.get_rust_tokenizer()
snake_case__ = tokenizer.encode(lowerCamelCase )
snake_case__ = rust_tokenizer.encode(lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
def A_ ( self , lowerCamelCase=15 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
snake_case__ = self.rust_tokenizer_class.from_pretrained(lowerCamelCase , **lowerCamelCase )
# Simple input
snake_case__ = "This is a simple input"
snake_case__ = ["This is a simple input 1", "This is a simple input 2"]
snake_case__ = ("This is a simple input", "This is a pair")
snake_case__ = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(lowerCamelCase , tokenizer_r.encode , lowerCamelCase , max_length=lowerCamelCase , padding="max_length" )
# Simple input
self.assertRaises(lowerCamelCase , tokenizer_r.encode_plus , lowerCamelCase , max_length=lowerCamelCase , padding="max_length" )
# Simple input
self.assertRaises(
lowerCamelCase , tokenizer_r.batch_encode_plus , lowerCamelCase , max_length=lowerCamelCase , padding="max_length" , )
# Pair input
self.assertRaises(lowerCamelCase , tokenizer_r.encode , lowerCamelCase , max_length=lowerCamelCase , padding="max_length" )
# Pair input
self.assertRaises(lowerCamelCase , tokenizer_r.encode_plus , lowerCamelCase , max_length=lowerCamelCase , padding="max_length" )
# Pair input
self.assertRaises(
lowerCamelCase , tokenizer_r.batch_encode_plus , lowerCamelCase , max_length=lowerCamelCase , padding="max_length" , )
def A_ ( self ):
pass
def A_ ( self ):
snake_case__ = ReformerTokenizer(lowerCamelCase , keep_accents=lowerCamelCase )
snake_case__ = tokenizer.tokenize("This is a test" )
self.assertListEqual(lowerCamelCase , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCamelCase ) , [2_85, 46, 10, 1_70, 3_82] , )
snake_case__ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
lowerCamelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
snake_case__ = tokenizer.convert_tokens_to_ids(lowerCamelCase )
self.assertListEqual(
lowerCamelCase , [8, 21, 84, 55, 24, 19, 7, 0, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
snake_case__ = tokenizer.convert_ids_to_tokens(lowerCamelCase )
self.assertListEqual(
lowerCamelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def A_ ( self ):
return ReformerTokenizer.from_pretrained("google/reformer-crime-and-punishment" )
@slow
def A_ ( self ):
snake_case__ = "Hello World!"
snake_case__ = [1_26, 32, 2_62, 1_52, 38, 72, 2_87]
self.assertListEqual(lowerCamelCase , self.big_tokenizer.encode(lowerCamelCase ) )
@slow
def A_ ( self ):
snake_case__ = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"
)
snake_case__ = [
1_08,
2_65,
24,
1_11,
4,
2_58,
1_56,
35,
28,
2_75,
3,
2_59,
2_97,
2_60,
84,
4,
35,
1_10,
44,
8,
2_59,
91,
2_68,
21,
11,
2_09,
2_74,
1_09,
2_66,
2_77,
1_17,
86,
93,
3_15,
2_58,
2_78,
2_58,
2_77,
2_58,
0,
2_58,
2_88,
2_58,
3_19,
2_58,
0,
2_58,
0,
2_58,
0,
2_58,
0,
2_58,
2_87,
2_58,
3_15,
2_58,
2_89,
2_58,
2_78,
99,
2_69,
2_66,
2_62,
8,
2_59,
2_41,
4,
2_17,
2_30,
2_68,
2_66,
55,
1_68,
1_06,
75,
1_93,
2_66,
2_23,
27,
49,
26,
2_82,
25,
2_64,
2_99,
19,
26,
0,
2_58,
2_77,
1_17,
86,
93,
1_76,
1_83,
2_70,
11,
2_62,
42,
61,
2_65,
]
self.assertListEqual(lowerCamelCase , self.big_tokenizer.encode(lowerCamelCase ) )
@require_torch
@slow
def A_ ( self ):
import torch
from transformers import ReformerConfig, ReformerModel
# Build sequence
snake_case__ = list(self.big_tokenizer.get_vocab().keys() )[:10]
snake_case__ = " ".join(lowerCamelCase )
snake_case__ = self.big_tokenizer.encode_plus(lowerCamelCase , return_tensors="pt" )
snake_case__ = self.big_tokenizer.batch_encode_plus([sequence, sequence] , return_tensors="pt" )
snake_case__ = ReformerConfig()
# The input gets padded during training so adjust the axial position encodings from the pretrained model value of (512, 1024)
snake_case__ = encoded_sequence["input_ids"].shape
snake_case__ = ReformerModel(lowerCamelCase )
# Reformer has config.vocab_size == tokenizer.vocab_size == len(tokenizer) - 1 = 320; len(tokenizer) is 321 (including a pad token with id 320)
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**lowerCamelCase )
model(**lowerCamelCase )
@slow
def A_ ( self ):
# fmt: off
snake_case__ = {"input_ids": [[1_08, 2_65, 24, 1_11, 4, 2_58, 1_56, 7, 51, 2_79, 58, 7, 76, 25, 69, 2_78], [1_40, 2_43, 2_64, 1_34, 17, 2_67, 77, 2_63, 22, 2_62, 2_97, 2_58, 3_04, 1_77, 2_79, 2_66, 14, 89, 13, 35, 2_61, 2_99, 2_72, 1_37, 2_75, 2_78]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# This tokenizer does not know some characters like ")".
# That is the reason why we use very simple texts here.
# Also see https://github.com/huggingface/transformers/pull/11737#issuecomment-850769064
snake_case__ = [
"This is a very simple sentence.",
"The quick brown fox jumps over the lazy dog.",
]
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase , model_name="google/reformer-crime-and-punishment" , revision="0e6c3decb8211d49bf881013425dc8b0448b3f5a" , padding=lowerCamelCase , sequences=lowerCamelCase , )
| 530
| 1
|
"""simple docstring"""
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def _snake_case ( snake_case__ : BertModel , snake_case__ : str , snake_case__ : str ):
A = ('dense.weight', 'attention.self.query', 'attention.self.key', 'attention.self.value')
A = (
('layer.', 'layer_'),
('word_embeddings.weight', 'word_embeddings'),
('position_embeddings.weight', 'position_embeddings'),
('token_type_embeddings.weight', 'token_type_embeddings'),
('.', '/'),
('LayerNorm/weight', 'LayerNorm/gamma'),
('LayerNorm/bias', 'LayerNorm/beta'),
('weight', 'kernel'),
)
if not os.path.isdir(snake_case__ ):
os.makedirs(snake_case__ )
A = model.state_dict()
def to_tf_var_name(snake_case__ : str ):
for patt, repl in iter(snake_case__ ):
A = name.replace(snake_case__ , snake_case__ )
return F'bert/{name}'
def create_tf_var(snake_case__ : np.ndarray , snake_case__ : str , snake_case__ : tf.Session ):
A = tf.dtypes.as_dtype(tensor.dtype )
A = tf.get_variable(dtype=snake_case__ , shape=tensor.shape , name=snake_case__ , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(snake_case__ )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
A = to_tf_var_name(snake_case__ )
A = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
A = torch_tensor.T
A = create_tf_var(tensor=snake_case__ , name=snake_case__ , session=snake_case__ )
tf.keras.backend.set_value(snake_case__ , snake_case__ )
A = session.run(snake_case__ )
print(F'Successfully created {tf_name}: {np.allclose(snake_case__ , snake_case__ )}' )
A = tf.train.Saver(tf.trainable_variables() )
saver.save(snake_case__ , os.path.join(snake_case__ , model_name.replace('-' , '_' ) + '.ckpt' ) )
def _snake_case ( snake_case__ : Tuple=None ):
A = argparse.ArgumentParser()
parser.add_argument('--model_name' , type=snake_case__ , required=snake_case__ , help='model name e.g. bert-base-uncased' )
parser.add_argument(
'--cache_dir' , type=snake_case__ , default=snake_case__ , required=snake_case__ , help='Directory containing pytorch model' )
parser.add_argument('--pytorch_model_path' , type=snake_case__ , required=snake_case__ , help='/path/to/<pytorch-model-name>.bin' )
parser.add_argument('--tf_cache_dir' , type=snake_case__ , required=snake_case__ , help='Directory in which to save tensorflow model' )
A = parser.parse_args(snake_case__ )
A = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=snake_case__ , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 91
|
"""simple docstring"""
import importlib.metadata
import operator
import re
import sys
from typing import Optional
from packaging import version
__A = {
"""<""": operator.lt,
"""<=""": operator.le,
"""==""": operator.eq,
"""!=""": operator.ne,
""">=""": operator.ge,
""">""": operator.gt,
}
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Union[str, Any]:
"""simple docstring"""
if got_ver is None or want_ver is None:
raise ValueError(
F"Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider"
F" reinstalling {pkg}." )
if not ops[op](version.parse(_SCREAMING_SNAKE_CASE ) , version.parse(_SCREAMING_SNAKE_CASE ) ):
raise ImportError(
F"{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}" )
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) ->None:
"""simple docstring"""
lowerCAmelCase__ :List[str] = F"\n{hint}" if hint is not None else ''
# non-versioned check
if re.match(r'^[\w_\-\d]+$' , _SCREAMING_SNAKE_CASE ):
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :Any = requirement, None, None
else:
lowerCAmelCase__ :List[str] = re.findall(r'^([^!=<>\s]+)([\s!=<>]{1,2}.+)' , _SCREAMING_SNAKE_CASE )
if not match:
raise ValueError(
'requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but'
F" got {requirement}" )
lowerCAmelCase__ , lowerCAmelCase__ :Union[str, Any] = match[0]
lowerCAmelCase__ :List[Any] = want_full.split(',' ) # there could be multiple requirements
lowerCAmelCase__ :Any = {}
for w in want_range:
lowerCAmelCase__ :Tuple = re.findall(r'^([\s!=<>]{1,2})(.+)' , _SCREAMING_SNAKE_CASE )
if not match:
raise ValueError(
'requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,'
F" but got {requirement}" )
lowerCAmelCase__ , lowerCAmelCase__ :int = match[0]
lowerCAmelCase__ :str = want_ver
if op not in ops:
raise ValueError(F"{requirement}: need one of {list(ops.keys() )}, but got {op}" )
# special case
if pkg == "python":
lowerCAmelCase__ :Any = '.'.join([str(_SCREAMING_SNAKE_CASE ) for x in sys.version_info[:3]] )
for op, want_ver in wanted.items():
_compare_versions(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return
# check if any version is installed
try:
lowerCAmelCase__ :List[Any] = importlib.metadata.version(_SCREAMING_SNAKE_CASE )
except importlib.metadata.PackageNotFoundError:
raise importlib.metadata.PackageNotFoundError(
F"The '{requirement}' distribution was not found and is required by this application. {hint}" )
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __A (_SCREAMING_SNAKE_CASE ) ->List[Any]:
"""simple docstring"""
lowerCAmelCase__ :Optional[Any] = 'Try: pip install transformers -U or pip install -e \'.[dev]\' if you\'re working with git main'
return require_version(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
| 93
| 0
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LDMTextToImagePipeline, UNetaDConditionModel
from diffusers.utils.testing_utils import (
enable_full_determinism,
load_numpy,
nightly,
require_torch_gpu,
slow,
torch_device,
)
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE__ ( __snake_case , unittest.TestCase ):
_lowerCAmelCase = LDMTextToImagePipeline
_lowerCAmelCase = TEXT_TO_IMAGE_PARAMS - {
"negative_prompt",
"negative_prompt_embeds",
"cross_attention_kwargs",
"prompt_embeds",
}
_lowerCAmelCase = PipelineTesterMixin.required_optional_params - {
"num_images_per_prompt",
"callback",
"callback_steps",
}
_lowerCAmelCase = TEXT_TO_IMAGE_BATCH_PARAMS
_lowerCAmelCase = False
def lowerCAmelCase__(self ):
'''simple docstring'''
torch.manual_seed(0 )
__a : Tuple = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
__a : int = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=_lowercase , set_alpha_to_one=_lowercase , )
torch.manual_seed(0 )
__a : List[str] = AutoencoderKL(
block_out_channels=(32, 64) , in_channels=3 , out_channels=3 , down_block_types=("""DownEncoderBlock2D""", """DownEncoderBlock2D""") , up_block_types=("""UpDecoderBlock2D""", """UpDecoderBlock2D""") , latent_channels=4 , )
torch.manual_seed(0 )
__a : Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
__a : Optional[Any] = CLIPTextModel(_lowercase )
__a : Tuple = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
__a : List[str] = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vqvae""": vae,
"""bert""": text_encoder,
"""tokenizer""": tokenizer,
}
return components
def lowerCAmelCase__(self , _lowercase , _lowercase=0 ):
'''simple docstring'''
if str(_lowercase ).startswith("""mps""" ):
__a : Union[str, Any] = torch.manual_seed(_lowercase )
else:
__a : Any = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
__a : Dict = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Optional[int] = """cpu""" # ensure determinism for the device-dependent torch.Generator
__a : List[str] = self.get_dummy_components()
__a : Union[str, Any] = LDMTextToImagePipeline(**_lowercase )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
__a : Dict = self.get_dummy_inputs(_lowercase )
__a : List[Any] = pipe(**_lowercase ).images
__a : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 16, 16, 3)
__a : Union[str, Any] = np.array([0.6101, 0.6156, 0.5622, 0.4895, 0.6661, 0.3804, 0.5748, 0.6136, 0.5014] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def lowerCAmelCase__(self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__(self , _lowercase , _lowercase=torch.floataa , _lowercase=0 ):
'''simple docstring'''
__a : Union[str, Any] = torch.manual_seed(_lowercase )
__a : int = np.random.RandomState(_lowercase ).standard_normal((1, 4, 32, 32) )
__a : Optional[int] = torch.from_numpy(_lowercase ).to(device=_lowercase , dtype=_lowercase )
__a : Any = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Union[str, Any] = LDMTextToImagePipeline.from_pretrained("""CompVis/ldm-text2im-large-256""" ).to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
__a : Optional[Any] = self.get_inputs(_lowercase )
__a : Any = pipe(**_lowercase ).images
__a : Optional[int] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 256, 256, 3)
__a : Union[str, Any] = np.array([0.5_1825, 0.5_2850, 0.5_2543, 0.5_4258, 0.5_2304, 0.5_2569, 0.5_4363, 0.5_5276, 0.5_6878] )
__a : int = np.abs(expected_slice - image_slice ).max()
assert max_diff < 1e-3
@nightly
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def lowerCAmelCase__(self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__(self , _lowercase , _lowercase=torch.floataa , _lowercase=0 ):
'''simple docstring'''
__a : List[str] = torch.manual_seed(_lowercase )
__a : str = np.random.RandomState(_lowercase ).standard_normal((1, 4, 32, 32) )
__a : List[Any] = torch.from_numpy(_lowercase ).to(device=_lowercase , dtype=_lowercase )
__a : int = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 50,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : List[Any] = LDMTextToImagePipeline.from_pretrained("""CompVis/ldm-text2im-large-256""" ).to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
__a : Tuple = self.get_inputs(_lowercase )
__a : str = pipe(**_lowercase ).images[0]
__a : Any = load_numpy(
"""https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/ldm_text2img/ldm_large_256_ddim.npy""" )
__a : List[str] = np.abs(expected_image - image ).max()
assert max_diff < 1e-3
| 711
|
"""simple docstring"""
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
def __init__(self , _lowercase , _lowercase=None , _lowercase=None , _lowercase=0 ):
'''simple docstring'''
__a : Any = 1.0 if scale is None else scale
__a : str = 0.0 if loc is None else loc
super().__init__(_lowercase , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=_lowercase )] )
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return self.base_dist.mean * self.scale + self.loc
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return self.base_dist.variance * self.scale**2
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return self.variance.sqrt()
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
def __init__(self , _lowercase , _lowercase , _lowercase , **_lowercase ):
'''simple docstring'''
super().__init__(**_lowercase )
__a : str = args_dim
__a : List[Any] = nn.ModuleList([nn.Linear(_lowercase , _lowercase ) for dim in args_dim.values()] )
__a : Dict = domain_map
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
__a : List[Any] = [proj(_lowercase ) for proj in self.proj]
return self.domain_map(*_lowercase )
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
def __init__(self , _lowercase ):
'''simple docstring'''
super().__init__()
__a : Optional[int] = function
def lowerCAmelCase__(self , _lowercase , *_lowercase ):
'''simple docstring'''
return self.function(_lowercase , *_lowercase )
class SCREAMING_SNAKE_CASE__ :
_lowerCAmelCase = 42
_lowerCAmelCase = 42
_lowerCAmelCase = 42
def __init__(self , _lowercase = 1 ):
'''simple docstring'''
__a : Optional[int] = dim
__a : str = {k: dim * self.args_dim[k] for k in self.args_dim}
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
if self.dim == 1:
return self.distribution_class(*_lowercase )
else:
return Independent(self.distribution_class(*_lowercase ) , 1 )
def lowerCAmelCase__(self , _lowercase , _lowercase = None , _lowercase = None , ):
'''simple docstring'''
__a : Tuple = self._base_distribution(_lowercase )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(_lowercase , loc=_lowercase , scale=_lowercase , event_dim=self.event_dim )
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return () if self.dim == 1 else (self.dim,)
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return len(self.event_shape )
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return 0.0
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
return ParameterProjection(
in_features=_lowercase , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def lowerCAmelCase__(self , *_lowercase ):
'''simple docstring'''
raise NotImplementedError()
@staticmethod
def lowerCAmelCase__(_lowercase ):
'''simple docstring'''
return (x + torch.sqrt(torch.square(_lowercase ) + 4.0 )) / 2.0
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
_lowerCAmelCase = {"df": 1, "loc": 1, "scale": 1}
_lowerCAmelCase = StudentT
@classmethod
def lowerCAmelCase__(cls , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
__a : int = cls.squareplus(_lowercase ).clamp_min(torch.finfo(scale.dtype ).eps )
__a : Optional[Any] = 2.0 + cls.squareplus(_lowercase )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
_lowerCAmelCase = {"loc": 1, "scale": 1}
_lowerCAmelCase = Normal
@classmethod
def lowerCAmelCase__(cls , _lowercase , _lowercase ):
'''simple docstring'''
__a : str = cls.squareplus(_lowercase ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
_lowerCAmelCase = {"total_count": 1, "logits": 1}
_lowerCAmelCase = NegativeBinomial
@classmethod
def lowerCAmelCase__(cls , _lowercase , _lowercase ):
'''simple docstring'''
__a : Union[str, Any] = cls.squareplus(_lowercase )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
__a , __a : Optional[Any] = distr_args
if self.dim == 1:
return self.distribution_class(total_count=_lowercase , logits=_lowercase )
else:
return Independent(self.distribution_class(total_count=_lowercase , logits=_lowercase ) , 1 )
def lowerCAmelCase__(self , _lowercase , _lowercase = None , _lowercase = None ):
'''simple docstring'''
__a , __a : List[Any] = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 63
| 0
|
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse('3.8'):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase=False):
try:
SCREAMING_SNAKE_CASE = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
SCREAMING_SNAKE_CASE = default
else:
# KEY is set, convert it to True or False.
try:
SCREAMING_SNAKE_CASE = strtobool(_UpperCAmelCase)
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F'''If set, {key} must be yes or no.''')
return _value
a_ : Optional[Any] = parse_flag_from_env('RUN_SLOW', default=False)
a_ : int = parse_flag_from_env('RUN_REMOTE', default=False)
a_ : List[str] = parse_flag_from_env('RUN_LOCAL', default=True)
a_ : Dict = parse_flag_from_env('RUN_PACKAGED', default=True)
# Compression
a_ : Tuple = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='test requires lz4')
a_ : Dict = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='test requires py7zr')
a_ : Optional[int] = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='test requires zstandard')
# Audio
a_ : List[Any] = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec('soundfile') is None or version.parse(importlib_metadata.version('soundfile')) < version.parse('0.12.0'),
reason='test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ',
)
# Beam
a_ : int = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('0.3.2'),
reason='test requires apache-beam and a compatible dill version',
)
# Dill-cloudpickle compatibility
a_ : Dict = pytest.mark.skipif(
config.DILL_VERSION <= version.parse('0.3.2'),
reason='test requires dill>0.3.2 for cloudpickle compatibility',
)
# Windows
a_ : Optional[int] = pytest.mark.skipif(
sys.platform == 'win32',
reason='test should not be run on Windows',
)
def lowerCamelCase__ (_UpperCAmelCase):
try:
import faiss # noqa
except ImportError:
SCREAMING_SNAKE_CASE = unittest.skip('test requires faiss')(_UpperCAmelCase)
return test_case
def lowerCamelCase__ (_UpperCAmelCase):
try:
import regex # noqa
except ImportError:
SCREAMING_SNAKE_CASE = unittest.skip('test requires regex')(_UpperCAmelCase)
return test_case
def lowerCamelCase__ (_UpperCAmelCase):
try:
import elasticsearch # noqa
except ImportError:
SCREAMING_SNAKE_CASE = unittest.skip('test requires elasticsearch')(_UpperCAmelCase)
return test_case
def lowerCamelCase__ (_UpperCAmelCase):
try:
import sqlalchemy # noqa
except ImportError:
SCREAMING_SNAKE_CASE = unittest.skip('test requires sqlalchemy')(_UpperCAmelCase)
return test_case
def lowerCamelCase__ (_UpperCAmelCase):
if not config.TORCH_AVAILABLE:
SCREAMING_SNAKE_CASE = unittest.skip('test requires PyTorch')(_UpperCAmelCase)
return test_case
def lowerCamelCase__ (_UpperCAmelCase):
if not config.TF_AVAILABLE:
SCREAMING_SNAKE_CASE = unittest.skip('test requires TensorFlow')(_UpperCAmelCase)
return test_case
def lowerCamelCase__ (_UpperCAmelCase):
if not config.JAX_AVAILABLE:
SCREAMING_SNAKE_CASE = unittest.skip('test requires JAX')(_UpperCAmelCase)
return test_case
def lowerCamelCase__ (_UpperCAmelCase):
if not config.PIL_AVAILABLE:
SCREAMING_SNAKE_CASE = unittest.skip('test requires Pillow')(_UpperCAmelCase)
return test_case
def lowerCamelCase__ (_UpperCAmelCase):
try:
import transformers # noqa F401
except ImportError:
return unittest.skip('test requires transformers')(_UpperCAmelCase)
else:
return test_case
def lowerCamelCase__ (_UpperCAmelCase):
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip('test requires tiktoken')(_UpperCAmelCase)
else:
return test_case
def lowerCamelCase__ (_UpperCAmelCase):
try:
import spacy # noqa F401
except ImportError:
return unittest.skip('test requires spacy')(_UpperCAmelCase)
else:
return test_case
def lowerCamelCase__ (_UpperCAmelCase):
def _require_spacy_model(_UpperCAmelCase):
try:
import spacy # noqa F401
spacy.load(_UpperCAmelCase)
except ImportError:
return unittest.skip('test requires spacy')(_UpperCAmelCase)
except OSError:
return unittest.skip('test requires spacy model \'{}\''.format(_UpperCAmelCase))(_UpperCAmelCase)
else:
return test_case
return _require_spacy_model
def lowerCamelCase__ (_UpperCAmelCase):
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip('test requires pyspark')(_UpperCAmelCase)
else:
return test_case
def lowerCamelCase__ (_UpperCAmelCase):
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip('test requires joblibspark')(_UpperCAmelCase)
else:
return test_case
def lowerCamelCase__ (_UpperCAmelCase):
if not _run_slow_tests or _run_slow_tests == 0:
SCREAMING_SNAKE_CASE = unittest.skip('test is slow')(_UpperCAmelCase)
return test_case
def lowerCamelCase__ (_UpperCAmelCase):
if not _run_local_tests or _run_local_tests == 0:
SCREAMING_SNAKE_CASE = unittest.skip('test is local')(_UpperCAmelCase)
return test_case
def lowerCamelCase__ (_UpperCAmelCase):
if not _run_packaged_tests or _run_packaged_tests == 0:
SCREAMING_SNAKE_CASE = unittest.skip('test is packaged')(_UpperCAmelCase)
return test_case
def lowerCamelCase__ (_UpperCAmelCase):
if not _run_remote_tests or _run_remote_tests == 0:
SCREAMING_SNAKE_CASE = unittest.skip('test requires remote')(_UpperCAmelCase)
return test_case
def lowerCamelCase__ (*_UpperCAmelCase):
def decorate(cls):
for name, fn in cls.__dict__.items():
if callable(_UpperCAmelCase) and name.startswith('test'):
for decorator in decorators:
SCREAMING_SNAKE_CASE = decorator(_UpperCAmelCase)
setattr(cls , _UpperCAmelCase , _UpperCAmelCase)
return cls
return decorate
class _snake_case ( A__ ):
pass
class _snake_case ( A__ ):
_lowercase : Optional[Any] = 0
_lowercase : Optional[Any] = 1
_lowercase : Optional[int] = 2
@contextmanager
def lowerCamelCase__ (_UpperCAmelCase=OfflineSimulationMode.CONNECTION_FAILS , _UpperCAmelCase=1e-16):
SCREAMING_SNAKE_CASE = requests.Session().request
def timeout_request(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase):
# Change the url to an invalid url so that the connection hangs
SCREAMING_SNAKE_CASE = 'https://10.255.255.1'
if kwargs.get('timeout') is None:
raise RequestWouldHangIndefinitelyError(
F'''Tried a call to {url} in offline mode with no timeout set. Please set a timeout.''')
SCREAMING_SNAKE_CASE = timeout
try:
return online_request(_UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase)
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
SCREAMING_SNAKE_CASE = url
SCREAMING_SNAKE_CASE = e.args[0]
SCREAMING_SNAKE_CASE = (max_retry_error.args[0].replace('10.255.255.1' , F'''OfflineMock[{url}]'''),)
SCREAMING_SNAKE_CASE = (max_retry_error,)
raise
def raise_connection_error(_UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase):
raise requests.ConnectionError('Offline mode is enabled.' , request=_UpperCAmelCase)
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch('requests.Session.send' , _UpperCAmelCase):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch('requests.Session.request' , _UpperCAmelCase):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch('datasets.config.HF_DATASETS_OFFLINE' , _UpperCAmelCase):
yield
else:
raise ValueError('Please use a value from the OfflineSimulationMode enum.')
@contextmanager
def lowerCamelCase__ (*_UpperCAmelCase , **_UpperCAmelCase):
SCREAMING_SNAKE_CASE = str(Path().resolve())
with tempfile.TemporaryDirectory(*_UpperCAmelCase , **_UpperCAmelCase) as tmp_dir:
try:
os.chdir(_UpperCAmelCase)
yield
finally:
os.chdir(_UpperCAmelCase)
@contextmanager
def lowerCamelCase__ ():
import gc
gc.collect()
SCREAMING_SNAKE_CASE = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def lowerCamelCase__ ():
import gc
gc.collect()
SCREAMING_SNAKE_CASE = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
return deepcopy(_UpperCAmelCase).integers(0 , 100 , 10).tolist() == deepcopy(_UpperCAmelCase).integers(0 , 100 , 10).tolist()
def lowerCamelCase__ (_UpperCAmelCase):
import decorator
from requests.exceptions import HTTPError
def _wrapper(_UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase):
try:
return func(*_UpperCAmelCase , **_UpperCAmelCase)
except HTTPError as err:
if str(_UpperCAmelCase).startswith('500') or str(_UpperCAmelCase).startswith('502'):
pytest.xfail(str(_UpperCAmelCase))
raise err
return decorator.decorator(_wrapper , _UpperCAmelCase)
class _snake_case :
def __init__( self , a , a , a) -> List[str]:
SCREAMING_SNAKE_CASE = returncode
SCREAMING_SNAKE_CASE = stdout
SCREAMING_SNAKE_CASE = stderr
async def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
while True:
SCREAMING_SNAKE_CASE = await stream.readline()
if line:
callback(_UpperCAmelCase)
else:
break
async def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=False , _UpperCAmelCase=False):
if echo:
print('\nRunning: ' , ' '.join(_UpperCAmelCase))
SCREAMING_SNAKE_CASE = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=_UpperCAmelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=_UpperCAmelCase , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = []
def tee(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=""):
SCREAMING_SNAKE_CASE = line.decode('utf-8').rstrip()
sink.append(_UpperCAmelCase)
if not quiet:
print(_UpperCAmelCase , _UpperCAmelCase , file=_UpperCAmelCase)
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout , lambda _UpperCAmelCase: tee(_UpperCAmelCase , _UpperCAmelCase , sys.stdout , label='stdout:')),
_read_stream(p.stderr , lambda _UpperCAmelCase: tee(_UpperCAmelCase , _UpperCAmelCase , sys.stderr , label='stderr:')),
] , timeout=_UpperCAmelCase , )
return _RunOutput(await p.wait() , _UpperCAmelCase , _UpperCAmelCase)
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=180 , _UpperCAmelCase=False , _UpperCAmelCase=True):
SCREAMING_SNAKE_CASE = asyncio.get_event_loop()
SCREAMING_SNAKE_CASE = loop.run_until_complete(
_stream_subprocess(_UpperCAmelCase , env=_UpperCAmelCase , stdin=_UpperCAmelCase , timeout=_UpperCAmelCase , quiet=_UpperCAmelCase , echo=_UpperCAmelCase))
SCREAMING_SNAKE_CASE = ' '.join(_UpperCAmelCase)
if result.returncode > 0:
SCREAMING_SNAKE_CASE = '\n'.join(result.stderr)
raise RuntimeError(
F'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n'''
F'''The combined stderr from workers follows:\n{stderr}''')
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(F'''\'{cmd_str}\' produced no output.''')
return result
def lowerCamelCase__ ():
SCREAMING_SNAKE_CASE = os.environ.get('PYTEST_XDIST_WORKER' , 'gw0')
SCREAMING_SNAKE_CASE = re.sub(R'^gw' , '' , _UpperCAmelCase , 0 , re.M)
return int(_UpperCAmelCase)
def lowerCamelCase__ ():
SCREAMING_SNAKE_CASE = 2_9500
SCREAMING_SNAKE_CASE = pytest_xdist_worker_id()
return port + uniq_delta
| 73
|
'''simple docstring'''
from abc import ABC, abstractmethod
from typing import Optional, Union
from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit
from ..utils.typing import NestedDataStructureLike, PathLike
class UpperCAmelCase_ ( lowerCamelCase_ ):
"""simple docstring"""
def __init__( self , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = False , lowerCamelCase = False , lowerCamelCase = None , **lowerCamelCase , ) -> Dict:
'''simple docstring'''
UpperCamelCase : Optional[Any] = path_or_paths
UpperCamelCase : List[str] = split if split or isinstance(lowerCamelCase , lowerCamelCase ) else "train"
UpperCamelCase : Any = features
UpperCamelCase : Optional[int] = cache_dir
UpperCamelCase : str = keep_in_memory
UpperCamelCase : str = streaming
UpperCamelCase : List[Any] = num_proc
UpperCamelCase : Optional[Any] = kwargs
@abstractmethod
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[Dataset, DatasetDict, IterableDataset, IterableDatasetDict]:
'''simple docstring'''
pass
class UpperCAmelCase_ ( lowerCamelCase_ ):
"""simple docstring"""
def __init__( self , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = False , lowerCamelCase = False , lowerCamelCase = None , **lowerCamelCase , ) -> str:
'''simple docstring'''
UpperCamelCase : Tuple = features
UpperCamelCase : str = cache_dir
UpperCamelCase : List[Any] = keep_in_memory
UpperCamelCase : int = streaming
UpperCamelCase : int = num_proc
UpperCamelCase : Tuple = kwargs
@abstractmethod
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[Dataset, IterableDataset]:
'''simple docstring'''
pass
| 173
| 0
|
'''simple docstring'''
def __UpperCAmelCase ( UpperCamelCase__ :Any ) -> Dict:
snake_case__ : List[Any] = [False] * len(UpperCamelCase__ )
snake_case__ : Dict = [-1] * len(UpperCamelCase__ )
def dfs(UpperCamelCase__ :str , UpperCamelCase__ :Union[str, Any] ):
snake_case__ : Any = True
snake_case__ : Tuple = c
for u in graph[v]:
if not visited[u]:
dfs(UpperCamelCase__ , 1 - c )
for i in range(len(UpperCamelCase__ ) ):
if not visited[i]:
dfs(UpperCamelCase__ , 0 )
for i in range(len(UpperCamelCase__ ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
lowercase_ : Optional[Any] ={0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 717
|
'''simple docstring'''
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class _SCREAMING_SNAKE_CASE (lowercase__ ):
A__ = 'M-CLIP'
def __init__( self : List[Any] , __UpperCamelCase : int=1024 , __UpperCamelCase : Union[str, Any]=768 , **__UpperCamelCase : int ) -> Any:
"""simple docstring"""
snake_case__ : int = transformerDimSize
snake_case__ : Tuple = imageDimSize
super().__init__(**__UpperCamelCase )
class _SCREAMING_SNAKE_CASE (lowercase__ ):
A__ = MCLIPConfig
def __init__( self : Optional[int] , __UpperCamelCase : Union[str, Any] , *__UpperCamelCase : Dict , **__UpperCamelCase : Any ) -> str:
"""simple docstring"""
super().__init__(__UpperCamelCase , *__UpperCamelCase , **__UpperCamelCase )
snake_case__ : str = XLMRobertaModel(__UpperCamelCase )
snake_case__ : Optional[Any] = torch.nn.Linear(
in_features=config.transformerDimensions , out_features=config.numDims )
def lowerCAmelCase ( self : Union[str, Any] , __UpperCamelCase : List[str] , __UpperCamelCase : Union[str, Any] ) -> Tuple:
"""simple docstring"""
snake_case__ : int = self.transformer(input_ids=__UpperCamelCase , attention_mask=__UpperCamelCase )[0]
snake_case__ : List[str] = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None]
return self.LinearTransformation(__UpperCamelCase ), embs
| 574
| 0
|
'''simple docstring'''
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
__lowercase : str = logging.getLogger(__name__)
class __UpperCamelCase ( lowerCAmelCase_ ):
def __init__( self , __a=-1 ):
'''simple docstring'''
__a : Tuple = label_idx
def __UpperCAmelCase ( self , __a , __a ):
'''simple docstring'''
if isinstance(__a , __a ):
__a : Any = mode.value
__a : List[Any] = os.path.join(__a , f"""{mode}.txt""" )
__a : Optional[Any] = 1
__a : str = []
with open(__a , encoding='utf-8' ) as f:
__a : Tuple = []
__a : Dict = []
for line in f:
if line.startswith('-DOCSTART-' ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=f"""{mode}-{guid_index}""" , words=__a , labels=__a ) )
guid_index += 1
__a : str = []
__a : int = []
else:
__a : Optional[int] = line.split(' ' )
words.append(splits[0] )
if len(__a ) > 1:
labels.append(splits[self.label_idx].replace('\n' , '' ) )
else:
# Examples could have no label for mode = "test"
labels.append('O' )
if words:
examples.append(InputExample(guid=f"""{mode}-{guid_index}""" , words=__a , labels=__a ) )
return examples
def __UpperCAmelCase ( self , __a , __a , __a ):
'''simple docstring'''
__a : List[str] = 0
for line in test_input_reader:
if line.startswith('-DOCSTART-' ) or line == "" or line == "\n":
writer.write(__a )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
__a : Tuple = line.split()[0] + ' ' + preds_list[example_id].pop(0 ) + '\n'
writer.write(__a )
else:
logger.warning('Maximum sequence length exceeded: No prediction for \'%s\'.' , line.split()[0] )
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
if path:
with open(__a , 'r' ) as f:
__a : Any = f.read().splitlines()
if "O" not in labels:
__a : Optional[int] = ['O'] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class __UpperCamelCase ( lowerCAmelCase_ ):
def __init__( self ):
'''simple docstring'''
super().__init__(label_idx=-2 )
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
if path:
with open(__a , 'r' ) as f:
__a : Any = f.read().splitlines()
if "O" not in labels:
__a : List[Any] = ['O'] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class __UpperCamelCase ( lowerCAmelCase_ ):
def __UpperCAmelCase ( self , __a , __a ):
'''simple docstring'''
if isinstance(__a , __a ):
__a : Dict = mode.value
__a : List[str] = os.path.join(__a , f"""{mode}.txt""" )
__a : Tuple = 1
__a : List[str] = []
with open(__a , encoding='utf-8' ) as f:
for sentence in parse_incr(__a ):
__a : Any = []
__a : Optional[int] = []
for token in sentence:
words.append(token['form'] )
labels.append(token['upos'] )
assert len(__a ) == len(__a )
if words:
examples.append(InputExample(guid=f"""{mode}-{guid_index}""" , words=__a , labels=__a ) )
guid_index += 1
return examples
def __UpperCAmelCase ( self , __a , __a , __a ):
'''simple docstring'''
__a : Tuple = 0
for sentence in parse_incr(__a ):
__a : int = preds_list[example_id]
__a : str = ''
for token in sentence:
out += f"""{token["form"]} ({token["upos"]}|{s_p.pop(0 )}) """
out += "\n"
writer.write(__a )
example_id += 1
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
if path:
with open(__a , 'r' ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 476
|
'''simple docstring'''
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
__lowercase : str = logging.getLogger(__name__)
class __UpperCamelCase ( lowerCAmelCase_ ):
def __init__( self , __a=-1 ):
'''simple docstring'''
__a : Tuple = label_idx
def __UpperCAmelCase ( self , __a , __a ):
'''simple docstring'''
if isinstance(__a , __a ):
__a : Any = mode.value
__a : List[Any] = os.path.join(__a , f"""{mode}.txt""" )
__a : Optional[Any] = 1
__a : str = []
with open(__a , encoding='utf-8' ) as f:
__a : Tuple = []
__a : Dict = []
for line in f:
if line.startswith('-DOCSTART-' ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=f"""{mode}-{guid_index}""" , words=__a , labels=__a ) )
guid_index += 1
__a : str = []
__a : int = []
else:
__a : Optional[int] = line.split(' ' )
words.append(splits[0] )
if len(__a ) > 1:
labels.append(splits[self.label_idx].replace('\n' , '' ) )
else:
# Examples could have no label for mode = "test"
labels.append('O' )
if words:
examples.append(InputExample(guid=f"""{mode}-{guid_index}""" , words=__a , labels=__a ) )
return examples
def __UpperCAmelCase ( self , __a , __a , __a ):
'''simple docstring'''
__a : List[str] = 0
for line in test_input_reader:
if line.startswith('-DOCSTART-' ) or line == "" or line == "\n":
writer.write(__a )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
__a : Tuple = line.split()[0] + ' ' + preds_list[example_id].pop(0 ) + '\n'
writer.write(__a )
else:
logger.warning('Maximum sequence length exceeded: No prediction for \'%s\'.' , line.split()[0] )
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
if path:
with open(__a , 'r' ) as f:
__a : Any = f.read().splitlines()
if "O" not in labels:
__a : Optional[int] = ['O'] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class __UpperCamelCase ( lowerCAmelCase_ ):
def __init__( self ):
'''simple docstring'''
super().__init__(label_idx=-2 )
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
if path:
with open(__a , 'r' ) as f:
__a : Any = f.read().splitlines()
if "O" not in labels:
__a : List[Any] = ['O'] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class __UpperCamelCase ( lowerCAmelCase_ ):
def __UpperCAmelCase ( self , __a , __a ):
'''simple docstring'''
if isinstance(__a , __a ):
__a : Dict = mode.value
__a : List[str] = os.path.join(__a , f"""{mode}.txt""" )
__a : Tuple = 1
__a : List[str] = []
with open(__a , encoding='utf-8' ) as f:
for sentence in parse_incr(__a ):
__a : Any = []
__a : Optional[int] = []
for token in sentence:
words.append(token['form'] )
labels.append(token['upos'] )
assert len(__a ) == len(__a )
if words:
examples.append(InputExample(guid=f"""{mode}-{guid_index}""" , words=__a , labels=__a ) )
guid_index += 1
return examples
def __UpperCAmelCase ( self , __a , __a , __a ):
'''simple docstring'''
__a : Tuple = 0
for sentence in parse_incr(__a ):
__a : int = preds_list[example_id]
__a : str = ''
for token in sentence:
out += f"""{token["form"]} ({token["upos"]}|{s_p.pop(0 )}) """
out += "\n"
writer.write(__a )
example_id += 1
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
if path:
with open(__a , 'r' ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 476
| 1
|
'''simple docstring'''
from math import pi, sqrt
def lowerCamelCase__ ( __lowerCamelCase : float ):
'''simple docstring'''
if num <= 0:
raise ValueError('math domain error' )
if num > 1_71.5:
raise OverflowError('math range error' )
elif num - int(__lowerCamelCase ) not in (0, 0.5):
raise NotImplementedError('num must be an integer or a half-integer' )
elif num == 0.5:
return sqrt(__lowerCamelCase )
else:
return 1.0 if num == 1 else (num - 1) * gamma(num - 1 )
def lowerCamelCase__ ( ):
'''simple docstring'''
assert gamma(0.5 ) == sqrt(__lowerCamelCase )
assert gamma(1 ) == 1.0
assert gamma(2 ) == 1.0
if __name__ == "__main__":
from doctest import testmod
testmod()
lowercase =1.0
while num:
lowercase =float(input('Gamma of: '))
print(F"""gamma({num}) = {gamma(num)}""")
print('\nEnter 0 to exit...')
| 331
|
'''simple docstring'''
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def lowerCamelCase__ ( ):
'''simple docstring'''
_UpperCAmelCase : List[str] =HfArgumentParser(__lowerCamelCase )
_UpperCAmelCase : Optional[int] =parser.parse_args_into_dataclasses()[0]
_UpperCAmelCase : Optional[Any] =TensorFlowBenchmark(args=__lowerCamelCase )
try:
_UpperCAmelCase : List[str] =parser.parse_args_into_dataclasses()[0]
except ValueError as e:
_UpperCAmelCase : Union[str, Any] ='Arg --no_{0} is no longer used, please use --no-{0} instead.'
_UpperCAmelCase : Dict =' '.join(str(__lowerCamelCase ).split(' ' )[:-1] )
_UpperCAmelCase : Tuple =''
_UpperCAmelCase : Optional[int] =eval(str(__lowerCamelCase ).split(' ' )[-1] )
_UpperCAmelCase : str =[]
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(__lowerCamelCase )
if len(__lowerCamelCase ) > 0:
_UpperCAmelCase : int =full_error_msg + begin_error_msg + str(__lowerCamelCase )
raise ValueError(__lowerCamelCase )
benchmark.run()
if __name__ == "__main__":
main()
| 331
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'''google/canine-s''': '''https://huggingface.co/google/canine-s/resolve/main/config.json''',
# See all CANINE models at https://huggingface.co/models?filter=canine
}
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = '''canine'''
def __init__( self , a_=768 , a_=12 , a_=12 , a_=3072 , a_="gelu" , a_=0.1 , a_=0.1 , a_=1_6384 , a_=16 , a_=0.02 , a_=1E-12 , a_=0 , a_=0Xe000 , a_=0Xe001 , a_=4 , a_=4 , a_=8 , a_=1_6384 , a_=128 , **a_ , ):
super().__init__(pad_token_id=a_ , bos_token_id=a_ , eos_token_id=a_ , **a_ )
lowerCamelCase_ : List[Any] = max_position_embeddings
lowerCamelCase_ : Dict = hidden_size
lowerCamelCase_ : int = num_hidden_layers
lowerCamelCase_ : Optional[Any] = num_attention_heads
lowerCamelCase_ : str = intermediate_size
lowerCamelCase_ : List[Any] = hidden_act
lowerCamelCase_ : Any = hidden_dropout_prob
lowerCamelCase_ : Union[str, Any] = attention_probs_dropout_prob
lowerCamelCase_ : Optional[int] = initializer_range
lowerCamelCase_ : str = type_vocab_size
lowerCamelCase_ : Any = layer_norm_eps
# Character config:
lowerCamelCase_ : Optional[int] = downsampling_rate
lowerCamelCase_ : str = upsampling_kernel_size
lowerCamelCase_ : Union[str, Any] = num_hash_functions
lowerCamelCase_ : List[Any] = num_hash_buckets
lowerCamelCase_ : Optional[int] = local_transformer_stride
| 250
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__magic_name__ = {
'''configuration_vivit''': ['''VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''VivitConfig'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['''VivitImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''VivitModel''',
'''VivitPreTrainedModel''',
'''VivitForVideoClassification''',
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 250
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCAmelCase = {
"configuration_llama": ["LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP", "LlamaConfig"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ["LlamaTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ["LlamaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
"LlamaForCausalLM",
"LlamaModel",
"LlamaPreTrainedModel",
"LlamaForSequenceClassification",
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 712
|
"""simple docstring"""
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def _snake_case ( ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ :Optional[int] = HfArgumentParser(lowercase__ )
lowerCAmelCase_ :List[Any] = parser.parse_args_into_dataclasses()[0]
lowerCAmelCase_ :Any = TensorFlowBenchmark(args=lowercase__ )
try:
lowerCAmelCase_ :str = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
lowerCAmelCase_ :List[Any] = """Arg --no_{0} is no longer used, please use --no-{0} instead."""
lowerCAmelCase_ :Dict = """ """.join(str(lowercase__ ).split(""" """ )[:-1] )
lowerCAmelCase_ :List[Any] = """"""
lowerCAmelCase_ :Dict = eval(str(lowercase__ ).split(""" """ )[-1] )
lowerCAmelCase_ :List[str] = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(lowercase__ )
if len(lowercase__ ) > 0:
lowerCAmelCase_ :List[str] = full_error_msg + begin_error_msg + str(lowercase__ )
raise ValueError(lowercase__ )
benchmark.run()
if __name__ == "__main__":
main()
| 256
| 0
|
'''simple docstring'''
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
def lowerCamelCase ( _snake_case : Dict ):
'''simple docstring'''
lowercase__ = torch.load(__lowerCAmelCase ,map_location="cpu" )
if "model" in sd.keys():
lowercase__ = torch.load(__lowerCAmelCase ,map_location="cpu" )["model"]
# pop unnecessary weights
lowercase__ = [
"decoder.version",
"decoder.output_projection.weight",
]
for key in keys_to_delete:
if key in sd:
sd.pop(__lowerCAmelCase )
lowercase__ = {
"decoder.project_in_dim.weight": "decoder.project_in.weight",
"decoder.project_out_dim.weight": "decoder.project_out.weight",
"decoder.layer_norm.weight": "decoder.final_layer_norm.weight",
"decoder.layer_norm.bias": "decoder.final_layer_norm.bias",
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
lowercase__ = sd.pop(__lowerCAmelCase )
lowercase__ = list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
lowercase__ = sd[key]
# We split QKV in separate Q,K,V
lowercase__ = key.replace(".qkv_proj." ,".q_proj." )
lowercase__ = key.replace(".qkv_proj." ,".k_proj." )
lowercase__ = key.replace(".qkv_proj." ,".v_proj." )
lowercase__ = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
lowercase__ , lowercase__ , lowercase__ = torch.split(__lowerCAmelCase ,depth // 3 ,dim=0 )
lowercase__ = q
lowercase__ = k
lowercase__ = v
del sd[key]
return sd
@torch.no_grad()
def lowerCamelCase ( _snake_case : Optional[Any] ,_snake_case : Tuple ,_snake_case : int=None ):
'''simple docstring'''
lowercase__ = load_checkpoint(__lowerCAmelCase )
if config is not None:
lowercase__ = OPTConfig.from_pretrained(__lowerCAmelCase )
else:
lowercase__ = OPTConfig()
lowercase__ = OPTModel(__lowerCAmelCase ).half().eval()
model.load_state_dict(__lowerCAmelCase )
# Check results
Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase )
model.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--fairseq_path",
type=str,
help=(
"path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:"
" https://huggingface.co/models?other=opt_metasq"
),
)
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--hf_config", default=None, type=str, help="Define HF config.")
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 267
|
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
snake_case : str = '''\
@inproceedings{wang2019glue,
title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},
author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},
note={In the Proceedings of ICLR.},
year={2019}
}
'''
snake_case : str = '''\
GLUE, the General Language Understanding Evaluation benchmark
(https://gluebenchmark.com/) is a collection of resources for training,
evaluating, and analyzing natural language understanding systems.
'''
snake_case : Union[str, Any] = '''
Compute GLUE evaluation metric associated to each GLUE dataset.
Args:
predictions: list of predictions to score.
Each translation should be tokenized into a list of tokens.
references: list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
Returns: depending on the GLUE subset, one or several of:
"accuracy": Accuracy
"f1": F1 score
"pearson": Pearson Correlation
"spearmanr": Spearman Correlation
"matthews_correlation": Matthew Correlation
Examples:
>>> glue_metric = datasets.load_metric(\'glue\', \'sst2\') # \'sst2\' or any of ["mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'mrpc\') # \'mrpc\' or \'qqp\'
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0, \'f1\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'stsb\')
>>> references = [0., 1., 2., 3., 4., 5.]
>>> predictions = [0., 1., 2., 3., 4., 5.]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print({"pearson": round(results["pearson"], 2), "spearmanr": round(results["spearmanr"], 2)})
{\'pearson\': 1.0, \'spearmanr\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'cola\')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'matthews_correlation\': 1.0}
'''
def __lowercase ( __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[int] ):
return float((preds == labels).mean() )
def __lowercase ( __lowerCAmelCase : List[str] , __lowerCAmelCase : Tuple ):
a__ = simple_accuracy(__lowerCAmelCase , __lowerCAmelCase )
a__ = float(fa_score(y_true=__lowerCAmelCase , y_pred=__lowerCAmelCase ) )
return {
"accuracy": acc,
"f1": fa,
}
def __lowercase ( __lowerCAmelCase : Any , __lowerCAmelCase : str ):
a__ = float(pearsonr(__lowerCAmelCase , __lowerCAmelCase )[0] )
a__ = float(spearmanr(__lowerCAmelCase , __lowerCAmelCase )[0] )
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case_ (datasets.Metric ):
def lowerCamelCase__( self :str ) -> Any:
if self.config_name not in [
"sst2",
"mnli",
"mnli_mismatched",
"mnli_matched",
"cola",
"stsb",
"mrpc",
"qqp",
"qnli",
"rte",
"wnli",
"hans",
]:
raise KeyError(
'You should supply a configuration name selected in '
'["sst2", "mnli", "mnli_mismatched", "mnli_matched", '
'"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]' )
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'predictions': datasets.Value('int64' if self.config_name != 'stsb' else 'float32' ),
'references': datasets.Value('int64' if self.config_name != 'stsb' else 'float32' ),
} ) ,codebase_urls=[] ,reference_urls=[] ,format='numpy' ,)
def lowerCamelCase__( self :List[Any] ,__snake_case :str ,__snake_case :List[str] ) -> Optional[Any]:
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(__snake_case ,__snake_case )}
elif self.config_name == "stsb":
return pearson_and_spearman(__snake_case ,__snake_case )
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_fa(__snake_case ,__snake_case )
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(__snake_case ,__snake_case )}
else:
raise KeyError(
'You should supply a configuration name selected in '
'["sst2", "mnli", "mnli_mismatched", "mnli_matched", '
'"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]' )
| 335
| 0
|
"""simple docstring"""
from __future__ import annotations
def lowerCamelCase__ ( UpperCAmelCase_ )-> list[int]:
"""simple docstring"""
return [ord(UpperCAmelCase_ ) - 96 for elem in plain]
def lowerCamelCase__ ( UpperCAmelCase_ )-> str:
"""simple docstring"""
return "".join(chr(elem + 96 ) for elem in encoded )
def lowerCamelCase__ ( )-> None:
"""simple docstring"""
UpperCamelCase = encode(input("-> " ).strip().lower() )
print("Encoded: " , UpperCAmelCase_ )
print("Decoded:" , decode(UpperCAmelCase_ ) )
if __name__ == "__main__":
main()
| 709
|
"""simple docstring"""
from .integrations import (
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_wandb_available,
run_hp_search_optuna,
run_hp_search_ray,
run_hp_search_sigopt,
run_hp_search_wandb,
)
from .trainer_utils import (
HPSearchBackend,
default_hp_space_optuna,
default_hp_space_ray,
default_hp_space_sigopt,
default_hp_space_wandb,
)
from .utils import logging
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class __a :
UpperCamelCase_ : str
UpperCamelCase_ : str = None
@staticmethod
def _SCREAMING_SNAKE_CASE ( )-> Union[str, Any]:
"""simple docstring"""
raise NotImplementedError
def _SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : int , UpperCAmelCase_ : str , **UpperCAmelCase_ : Dict )-> Union[str, Any]:
"""simple docstring"""
raise NotImplementedError
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , UpperCAmelCase_ : str )-> Any:
"""simple docstring"""
raise NotImplementedError
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] )-> List[str]:
"""simple docstring"""
if not self.is_available():
raise RuntimeError(
f"You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}." )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Union[str, Any] )-> int:
"""simple docstring"""
return f"`pip install {cls.pip_package or cls.name}`"
class __a ( _lowerCAmelCase ):
UpperCamelCase_ : List[str] = '''optuna'''
@staticmethod
def _SCREAMING_SNAKE_CASE ( )-> Union[str, Any]:
"""simple docstring"""
return is_optuna_available()
def _SCREAMING_SNAKE_CASE ( self : Dict , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : str , **UpperCAmelCase_ : Optional[Any] )-> Union[str, Any]:
"""simple docstring"""
return run_hp_search_optuna(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Dict , UpperCAmelCase_ : Union[str, Any] )-> Union[str, Any]:
"""simple docstring"""
return default_hp_space_optuna(UpperCAmelCase_ )
class __a ( _lowerCAmelCase ):
UpperCamelCase_ : Optional[int] = '''ray'''
UpperCamelCase_ : Tuple = '''\'ray[tune]\''''
@staticmethod
def _SCREAMING_SNAKE_CASE ( )-> Any:
"""simple docstring"""
return is_ray_available()
def _SCREAMING_SNAKE_CASE ( self : List[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : int , UpperCAmelCase_ : str , **UpperCAmelCase_ : int )-> int:
"""simple docstring"""
return run_hp_search_ray(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : str , UpperCAmelCase_ : str )-> Union[str, Any]:
"""simple docstring"""
return default_hp_space_ray(UpperCAmelCase_ )
class __a ( _lowerCAmelCase ):
UpperCamelCase_ : Union[str, Any] = '''sigopt'''
@staticmethod
def _SCREAMING_SNAKE_CASE ( )-> List[str]:
"""simple docstring"""
return is_sigopt_available()
def _SCREAMING_SNAKE_CASE ( self : Tuple , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : str , **UpperCAmelCase_ : Tuple )-> Tuple:
"""simple docstring"""
return run_hp_search_sigopt(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , UpperCAmelCase_ : Tuple )-> Optional[Any]:
"""simple docstring"""
return default_hp_space_sigopt(UpperCAmelCase_ )
class __a ( _lowerCAmelCase ):
UpperCamelCase_ : Any = '''wandb'''
@staticmethod
def _SCREAMING_SNAKE_CASE ( )-> List[Any]:
"""simple docstring"""
return is_wandb_available()
def _SCREAMING_SNAKE_CASE ( self : int , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : int , UpperCAmelCase_ : str , **UpperCAmelCase_ : List[str] )-> str:
"""simple docstring"""
return run_hp_search_wandb(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Tuple , UpperCAmelCase_ : List[Any] )-> List[Any]:
"""simple docstring"""
return default_hp_space_wandb(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE = {
HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend]
}
def lowerCamelCase__ ( )-> str:
"""simple docstring"""
UpperCamelCase = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()]
if len(UpperCAmelCase_ ) > 0:
UpperCamelCase = available_backends[0].name
if len(UpperCAmelCase_ ) > 1:
logger.info(
F"{len(UpperCAmelCase_ )} hyperparameter search backends available. Using {name} as the default." )
return name
raise RuntimeError(
"No hyperparameter search backend available.\n"
+ "\n".join(
F" - To install {backend.name} run {backend.pip_install()}"
for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
| 556
| 0
|
'''simple docstring'''
from argparse import ArgumentParser
from ..pipelines import Pipeline, PipelineDataFormat, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
lowerCamelCase : Dict = logging.get_logger(__name__) # pylint: disable=invalid-name
def _SCREAMING_SNAKE_CASE (A ) -> Optional[int]:
"""simple docstring"""
if not path:
return "pipe"
for ext in PipelineDataFormat.SUPPORTED_FORMATS:
if path.endswith(A ):
return ext
raise Exception(
f"Unable to determine file format from file extension {path}. "
f"Please provide the format through --format {PipelineDataFormat.SUPPORTED_FORMATS}" )
def _SCREAMING_SNAKE_CASE (A ) -> int:
"""simple docstring"""
lowercase__ = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
lowercase__ = try_infer_format_from_ext(args.input ) if args.format == '''infer''' else args.format
lowercase__ = PipelineDataFormat.from_str(
format=A , output_path=args.output , input_path=args.input , column=args.column if args.column else nlp.default_input_names , overwrite=args.overwrite , )
return RunCommand(A , A )
class __lowerCAmelCase (lowercase_ ):
'''simple docstring'''
def __init__(self : Dict , UpperCamelCase : Pipeline , UpperCamelCase : PipelineDataFormat ):
'''simple docstring'''
lowercase__ = nlp
lowercase__ = reader
@staticmethod
def UpperCamelCase__ (UpperCamelCase : ArgumentParser ):
'''simple docstring'''
lowercase__ = parser.add_parser('''run''' , help='''Run a pipeline through the CLI''' )
run_parser.add_argument('''--task''' , choices=get_supported_tasks() , help='''Task to run''' )
run_parser.add_argument('''--input''' , type=UpperCamelCase , help='''Path to the file to use for inference''' )
run_parser.add_argument('''--output''' , type=UpperCamelCase , help='''Path to the file that will be used post to write results.''' )
run_parser.add_argument('''--model''' , type=UpperCamelCase , help='''Name or path to the model to instantiate.''' )
run_parser.add_argument('''--config''' , type=UpperCamelCase , help='''Name or path to the model\'s config to instantiate.''' )
run_parser.add_argument(
'''--tokenizer''' , type=UpperCamelCase , help='''Name of the tokenizer to use. (default: same as the model name)''' )
run_parser.add_argument(
'''--column''' , type=UpperCamelCase , help='''Name of the column to use as input. (For multi columns input as QA use column1,columns2)''' , )
run_parser.add_argument(
'''--format''' , type=UpperCamelCase , default='''infer''' , choices=PipelineDataFormat.SUPPORTED_FORMATS , help='''Input format to read from''' , )
run_parser.add_argument(
'''--device''' , type=UpperCamelCase , default=-1 , help='''Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)''' , )
run_parser.add_argument('''--overwrite''' , action='''store_true''' , help='''Allow overwriting the output file.''' )
run_parser.set_defaults(func=UpperCamelCase )
def UpperCamelCase__ (self : int ):
'''simple docstring'''
lowercase__ ,lowercase__ = self._nlp, []
for entry in self._reader:
lowercase__ = nlp(**UpperCamelCase ) if self._reader.is_multi_columns else nlp(UpperCamelCase )
if isinstance(UpperCamelCase , UpperCamelCase ):
outputs.append(UpperCamelCase )
else:
outputs += output
# Saving data
if self._nlp.binary_output:
lowercase__ = self._reader.save_binary(UpperCamelCase )
logger.warning(f"Current pipeline requires output to be in binary format, saving at {binary_path}" )
else:
self._reader.save(UpperCamelCase )
| 460
|
'''simple docstring'''
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel
from transformers.models.esm.modeling_esm import (
ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
EsmEmbeddings,
create_position_ids_from_input_ids,
)
class __lowerCAmelCase :
'''simple docstring'''
def __init__(self : Optional[int] , UpperCamelCase : Tuple , UpperCamelCase : str=13 , UpperCamelCase : Optional[int]=7 , UpperCamelCase : Union[str, Any]=False , UpperCamelCase : Dict=True , UpperCamelCase : Optional[int]=False , UpperCamelCase : Optional[Any]=True , UpperCamelCase : Union[str, Any]=33 , UpperCamelCase : List[Any]=32 , UpperCamelCase : Dict=5 , UpperCamelCase : str=4 , UpperCamelCase : str=37 , UpperCamelCase : List[Any]="gelu" , UpperCamelCase : List[str]=0.1 , UpperCamelCase : Dict=0.1 , UpperCamelCase : Dict=512 , UpperCamelCase : Any=16 , UpperCamelCase : List[Any]=2 , UpperCamelCase : List[Any]=0.02 , UpperCamelCase : Tuple=3 , UpperCamelCase : Optional[Any]=4 , UpperCamelCase : int=None , ):
'''simple docstring'''
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = seq_length
lowercase__ = is_training
lowercase__ = use_input_mask
lowercase__ = use_token_type_ids
lowercase__ = use_labels
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_size
lowercase__ = type_sequence_label_size
lowercase__ = initializer_range
lowercase__ = num_labels
lowercase__ = num_choices
lowercase__ = scope
def UpperCamelCase__ (self : str ):
'''simple docstring'''
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__ = None
if self.use_input_mask:
lowercase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ = None
lowercase__ = None
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase__ = ids_tensor([self.batch_size] , self.num_choices )
lowercase__ = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase__ (self : Optional[Any] ):
'''simple docstring'''
return EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def UpperCamelCase__ (self : Optional[int] , UpperCamelCase : Union[str, Any] , UpperCamelCase : int , UpperCamelCase : List[Any] , UpperCamelCase : str , UpperCamelCase : str , UpperCamelCase : Dict ):
'''simple docstring'''
lowercase__ = EsmModel(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
lowercase__ = model(UpperCamelCase , attention_mask=UpperCamelCase )
lowercase__ = model(UpperCamelCase )
lowercase__ = model(UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCamelCase__ (self : str , UpperCamelCase : Any , UpperCamelCase : Tuple , UpperCamelCase : Any , UpperCamelCase : List[Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : Optional[int] ):
'''simple docstring'''
lowercase__ = EsmForMaskedLM(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
lowercase__ = model(UpperCamelCase , attention_mask=UpperCamelCase , labels=UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase__ (self : Any , UpperCamelCase : str , UpperCamelCase : List[Any] , UpperCamelCase : Tuple , UpperCamelCase : int , UpperCamelCase : Union[str, Any] , UpperCamelCase : List[Any] ):
'''simple docstring'''
lowercase__ = self.num_labels
lowercase__ = EsmForTokenClassification(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
lowercase__ = model(UpperCamelCase , attention_mask=UpperCamelCase , labels=UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase__ (self : Optional[Any] ):
'''simple docstring'''
lowercase__ = self.prepare_config_and_inputs()
(
(
lowercase__
) ,(
lowercase__
) ,(
lowercase__
) ,(
lowercase__
) ,(
lowercase__
) ,(
lowercase__
) ,
) = config_and_inputs
lowercase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __lowerCAmelCase (lowercase_ , lowercase_ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ : Dict = False
lowerCAmelCase__ : Optional[Any] = (
(
EsmForMaskedLM,
EsmModel,
EsmForSequenceClassification,
EsmForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCAmelCase__ : Optional[int] = ()
lowerCAmelCase__ : Any = (
{
"""feature-extraction""": EsmModel,
"""fill-mask""": EsmForMaskedLM,
"""text-classification""": EsmForSequenceClassification,
"""token-classification""": EsmForTokenClassification,
"""zero-shot""": EsmForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase__ : Any = True
def UpperCamelCase__ (self : List[Any] ):
'''simple docstring'''
lowercase__ = EsmModelTester(self )
lowercase__ = ConfigTester(self , config_class=UpperCamelCase , hidden_size=37 )
def UpperCamelCase__ (self : Union[str, Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase__ (self : Optional[Any] ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase )
def UpperCamelCase__ (self : Any ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowercase__ = type
self.model_tester.create_and_check_model(*UpperCamelCase )
def UpperCamelCase__ (self : Optional[int] ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase )
def UpperCamelCase__ (self : List[Any] ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCamelCase )
@slow
def UpperCamelCase__ (self : Union[str, Any] ):
'''simple docstring'''
for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = EsmModel.from_pretrained(UpperCamelCase )
self.assertIsNotNone(UpperCamelCase )
def UpperCamelCase__ (self : Optional[int] ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()[0]
lowercase__ = EsmEmbeddings(config=UpperCamelCase )
lowercase__ = torch.as_tensor([[12, 31, 13, model.padding_idx]] )
lowercase__ = torch.as_tensor(
[
[
0 + model.padding_idx + 1,
1 + model.padding_idx + 1,
2 + model.padding_idx + 1,
model.padding_idx,
]
] )
lowercase__ = create_position_ids_from_input_ids(UpperCamelCase , model.padding_idx )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(UpperCamelCase , UpperCamelCase ) ) )
def UpperCamelCase__ (self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()[0]
lowercase__ = EsmEmbeddings(config=UpperCamelCase )
lowercase__ = torch.empty(2 , 4 , 30 )
lowercase__ = [
0 + embeddings.padding_idx + 1,
1 + embeddings.padding_idx + 1,
2 + embeddings.padding_idx + 1,
3 + embeddings.padding_idx + 1,
]
lowercase__ = torch.as_tensor([expected_single_positions, expected_single_positions] )
lowercase__ = embeddings.create_position_ids_from_inputs_embeds(UpperCamelCase )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(UpperCamelCase , UpperCamelCase ) ) )
@unittest.skip('''Esm does not support embedding resizing''' )
def UpperCamelCase__ (self : Optional[int] ):
'''simple docstring'''
pass
@unittest.skip('''Esm does not support embedding resizing''' )
def UpperCamelCase__ (self : Optional[Any] ):
'''simple docstring'''
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def UpperCamelCase__ (self : Optional[int] ):
'''simple docstring'''
pass
@require_torch
class __lowerCAmelCase (lowercase_ ):
'''simple docstring'''
@slow
def UpperCamelCase__ (self : Optional[Any] ):
'''simple docstring'''
with torch.no_grad():
lowercase__ = EsmForMaskedLM.from_pretrained('''facebook/esm2_t6_8M_UR50D''' )
model.eval()
lowercase__ = torch.tensor([[0, 1, 2, 3, 4, 5]] )
lowercase__ = model(UpperCamelCase )[0]
lowercase__ = 33
lowercase__ = torch.Size((1, 6, vocab_size) )
self.assertEqual(output.shape , UpperCamelCase )
lowercase__ = torch.tensor(
[[[8.92_15, -10.58_98, -6.46_71], [-6.39_67, -13.91_14, -1.12_12], [-7.78_12, -13.95_16, -3.74_06]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCamelCase , atol=1E-4 ) )
@slow
def UpperCamelCase__ (self : Optional[Any] ):
'''simple docstring'''
with torch.no_grad():
lowercase__ = EsmModel.from_pretrained('''facebook/esm2_t6_8M_UR50D''' )
model.eval()
lowercase__ = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
lowercase__ = model(UpperCamelCase )[0]
# compare the actual values for a slice.
lowercase__ = torch.tensor(
[[[0.14_44, 0.54_13, 0.32_48], [0.30_34, 0.00_53, 0.31_08], [0.32_28, -0.24_99, 0.34_15]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCamelCase , atol=1E-4 ) )
| 460
| 1
|
from __future__ import annotations
def A_ ( A__ ) -> bool:
a__ : Dict = len(SCREAMING_SNAKE_CASE_ )
# We need to create solution object to save path.
a__ : List[Any] = [[0 for _ in range(SCREAMING_SNAKE_CASE_ )] for _ in range(SCREAMING_SNAKE_CASE_ )]
a__ : int = run_maze(SCREAMING_SNAKE_CASE_ , 0 , 0 , SCREAMING_SNAKE_CASE_ )
if solved:
print('\n'.join(str(SCREAMING_SNAKE_CASE_ ) for row in solutions ) )
else:
print('No solution exists!' )
return solved
def A_ ( A__ , A__ , A__ , A__ ) -> bool:
a__ : Dict = len(SCREAMING_SNAKE_CASE_ )
# Final check point.
if i == j == (size - 1):
a__ : List[str] = 1
return True
a__ : int = (not i < 0) and (not j < 0) # Check lower bounds
a__ : Optional[int] = (i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
a__ : Dict = (not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
a__ : int = 1
# check for directions
if (
run_maze(SCREAMING_SNAKE_CASE_ , i + 1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
or run_maze(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , j + 1 , SCREAMING_SNAKE_CASE_ )
or run_maze(SCREAMING_SNAKE_CASE_ , i - 1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
or run_maze(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , j - 1 , SCREAMING_SNAKE_CASE_ )
):
return True
a__ : Tuple = 0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 700
|
def A_ ( A__ , A__ ) -> List[Any]:
a__ : int = 0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def A_ ( A__ , A__ , A__ ) -> Dict:
a__ : Optional[Any] = 0
while b > 0:
if b & 1:
a__ : Optional[int] = ((res % c) + (a % c)) % c
a += a
b >>= 1
return res
| 392
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
snake_case_ : Optional[int] = {'''configuration_vit_mae''': ['''VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTMAEConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Optional[Any] = [
'''VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTMAEForPreTraining''',
'''ViTMAELayer''',
'''ViTMAEModel''',
'''ViTMAEPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : int = [
'''TFViTMAEForPreTraining''',
'''TFViTMAEModel''',
'''TFViTMAEPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
snake_case_ : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 138
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
snake_case_ : Union[str, Any] = '''https://www.indeed.co.in/jobs?q=mobile+app+development&l='''
def lowercase__( _UpperCamelCase : str = "mumbai" )-> Generator[tuple[str, str], None, None]:
"""simple docstring"""
_UpperCamelCase = BeautifulSoup(requests.get(url + location ).content , "html.parser" )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all("div" , attrs={"data-tn-component": "organicJob"} ):
_UpperCamelCase = job.find("a" , attrs={"data-tn-element": "jobTitle"} ).text.strip()
_UpperCamelCase = job.find("span" , {"class": "company"} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs('''Bangalore'''), 1):
print(F"""Job {i:>2} is {job[0]} at {job[1]}""")
| 138
| 1
|
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
flip_channel_order,
get_resize_output_image_size,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
if is_vision_available():
import PIL
if is_torch_available():
import torch
__lowerCAmelCase = logging.get_logger(__name__)
class _lowerCAmelCase ( __snake_case ):
__lowerCAmelCase : Optional[Any] = ['''pixel_values''']
def __init__( self : List[str] , a : bool = True , a : Dict[str, int] = None , a : PILImageResampling = PILImageResampling.BILINEAR , a : bool = True , a : Union[int, float] = 1 / 255 , a : bool = True , a : Dict[str, int] = None , a : bool = True , **a : Optional[Any] , ) -> None:
"""simple docstring"""
super().__init__(**a )
lowercase = size if size is not None else {'''shortest_edge''': 224}
lowercase = get_size_dict(a , default_to_square=a )
lowercase = crop_size if crop_size is not None else {'''height''': 256, '''width''': 256}
lowercase = get_size_dict(a , param_name='''crop_size''' )
lowercase = do_resize
lowercase = size
lowercase = resample
lowercase = do_rescale
lowercase = rescale_factor
lowercase = do_center_crop
lowercase = crop_size
lowercase = do_flip_channel_order
def _lowerCAmelCase ( self : Optional[int] , a : np.ndarray , a : Dict[str, int] , a : PILImageResampling = PIL.Image.BILINEAR , a : Optional[Union[str, ChannelDimension]] = None , **a : str , ) -> np.ndarray:
"""simple docstring"""
lowercase = get_size_dict(a , default_to_square=a )
if "shortest_edge" not in size:
raise ValueError(f"""The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}""" )
lowercase = get_resize_output_image_size(a , size=size['''shortest_edge'''] , default_to_square=a )
return resize(a , size=a , resample=a , data_format=a , **a )
def _lowerCAmelCase ( self : Tuple , a : np.ndarray , a : Dict[str, int] , a : Optional[Union[str, ChannelDimension]] = None , **a : Tuple , ) -> np.ndarray:
"""simple docstring"""
lowercase = get_size_dict(a )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}""" )
return center_crop(a , size=(size['''height'''], size['''width''']) , data_format=a , **a )
def _lowerCAmelCase ( self : int , a : np.ndarray , a : Union[int, float] , a : Optional[Union[str, ChannelDimension]] = None , **a : List[str] , ) -> Optional[int]:
"""simple docstring"""
return rescale(a , scale=a , data_format=a , **a )
def _lowerCAmelCase ( self : List[Any] , a : np.ndarray , a : Optional[Union[str, ChannelDimension]] = None ) -> np.ndarray:
"""simple docstring"""
return flip_channel_order(a , data_format=a )
def _lowerCAmelCase ( self : int , a : ImageInput , a : bool = None , a : Dict[str, int] = None , a : PILImageResampling = None , a : bool = None , a : float = None , a : bool = None , a : Dict[str, int] = None , a : bool = None , a : Optional[Union[str, TensorType]] = None , a : ChannelDimension = ChannelDimension.FIRST , **a : Any , ) -> PIL.Image.Image:
"""simple docstring"""
lowercase = do_resize if do_resize is not None else self.do_resize
lowercase = resample if resample is not None else self.resample
lowercase = do_rescale if do_rescale is not None else self.do_rescale
lowercase = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase = do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase = (
do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order
)
lowercase = size if size is not None else self.size
lowercase = get_size_dict(a , default_to_square=a )
lowercase = crop_size if crop_size is not None else self.crop_size
lowercase = get_size_dict(a , param_name='''crop_size''' )
lowercase = make_list_of_images(a )
if not valid_images(a ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
# All transformations expect numpy arrays.
lowercase = [to_numpy_array(a ) for image in images]
if do_resize:
lowercase = [self.resize(image=a , size=a , resample=a ) for image in images]
if do_center_crop:
lowercase = [self.center_crop(image=a , size=a ) for image in images]
if do_rescale:
lowercase = [self.rescale(image=a , scale=a ) for image in images]
# the pretrained checkpoints assume images are BGR, not RGB
if do_flip_channel_order:
lowercase = [self.flip_channel_order(image=a ) for image in images]
lowercase = [to_channel_dimension_format(a , a ) for image in images]
lowercase = {'''pixel_values''': images}
return BatchFeature(data=a , tensor_type=a )
def _lowerCAmelCase ( self : List[Any] , a : List[Any] , a : List[Tuple] = None ) -> List[str]:
"""simple docstring"""
lowercase = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(a ) != len(a ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(a ):
lowercase = target_sizes.numpy()
lowercase = []
for idx in range(len(a ) ):
lowercase = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=a )
lowercase = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(a )
else:
lowercase = logits.argmax(dim=1 )
lowercase = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 396
|
"""simple docstring"""
import os
from typing import BinaryIO, Optional, Union
import numpy as np
import pyarrow.parquet as pq
from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config
from ..features.features import FeatureType, _visit
from ..formatting import query_table
from ..packaged_modules import _PACKAGED_DATASETS_MODULES
from ..packaged_modules.parquet.parquet import Parquet
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
def A_ ( __UpperCamelCase : Features ):
lowercase = np.inf
def set_batch_size(__UpperCamelCase : FeatureType ) -> None:
nonlocal batch_size
if isinstance(__UpperCamelCase , __UpperCamelCase ):
lowercase = min(__UpperCamelCase , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS )
elif isinstance(__UpperCamelCase , __UpperCamelCase ):
lowercase = min(__UpperCamelCase , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS )
elif isinstance(__UpperCamelCase , __UpperCamelCase ) and feature.dtype == "binary":
lowercase = min(__UpperCamelCase , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS )
_visit(__UpperCamelCase , __UpperCamelCase )
return None if batch_size is np.inf else batch_size
class _lowerCAmelCase ( __snake_case ):
def __init__( self : str , a : NestedDataStructureLike[PathLike] , a : Optional[NamedSplit] = None , a : Optional[Features] = None , a : str = None , a : bool = False , a : bool = False , a : Optional[int] = None , **a : Dict , ) -> Any:
"""simple docstring"""
super().__init__(
a , split=a , features=a , cache_dir=a , keep_in_memory=a , streaming=a , num_proc=a , **a , )
lowercase = path_or_paths if isinstance(a , a ) else {self.split: path_or_paths}
lowercase = _PACKAGED_DATASETS_MODULES['''parquet'''][1]
lowercase = Parquet(
cache_dir=a , data_files=a , features=a , hash=a , **a , )
def _lowerCAmelCase ( self : int ) -> Union[str, Any]:
"""simple docstring"""
# Build iterable dataset
if self.streaming:
lowercase = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
lowercase = None
lowercase = None
lowercase = None
lowercase = None
self.builder.download_and_prepare(
download_config=a , download_mode=a , verification_mode=a , base_path=a , num_proc=self.num_proc , )
lowercase = self.builder.as_dataset(
split=self.split , verification_mode=a , in_memory=self.keep_in_memory )
return dataset
class _lowerCAmelCase :
def __init__( self : Optional[Any] , a : Dataset , a : Union[PathLike, BinaryIO] , a : Optional[int] = None , **a : int , ) -> Optional[Any]:
"""simple docstring"""
lowercase = dataset
lowercase = path_or_buf
lowercase = batch_size or get_writer_batch_size(dataset.features )
lowercase = parquet_writer_kwargs
def _lowerCAmelCase ( self : List[Any] ) -> int:
"""simple docstring"""
lowercase = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with open(self.path_or_buf , '''wb+''' ) as buffer:
lowercase = self._write(file_obj=a , batch_size=a , **self.parquet_writer_kwargs )
else:
lowercase = self._write(file_obj=self.path_or_buf , batch_size=a , **self.parquet_writer_kwargs )
return written
def _lowerCAmelCase ( self : Optional[int] , a : BinaryIO , a : int , **a : List[Any] ) -> int:
"""simple docstring"""
lowercase = 0
lowercase = parquet_writer_kwargs.pop('''path_or_buf''' , a )
lowercase = self.dataset.features.arrow_schema
lowercase = pq.ParquetWriter(a , schema=a , **a )
for offset in logging.tqdm(
range(0 , len(self.dataset ) , a ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating parquet from Arrow format''' , ):
lowercase = query_table(
table=self.dataset._data , key=slice(a , offset + batch_size ) , indices=self.dataset._indices if self.dataset._indices is not None else None , )
writer.write_table(a )
written += batch.nbytes
writer.close()
return written
| 396
| 1
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
lowercase__ : str = logging.get_logger(__name__)
lowercase__ : Optional[Any] = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
lowercase__ : Dict = {
"vocab_file": {
"bert-base-uncased": "https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt",
"bert-large-uncased": "https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt",
"bert-base-cased": "https://huggingface.co/bert-base-cased/resolve/main/vocab.txt",
"bert-large-cased": "https://huggingface.co/bert-large-cased/resolve/main/vocab.txt",
"bert-base-multilingual-uncased": (
"https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt"
),
"bert-base-multilingual-cased": "https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt",
"bert-base-chinese": "https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt",
"bert-base-german-cased": "https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt",
"bert-large-uncased-whole-word-masking": (
"https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt"
),
"bert-large-cased-whole-word-masking": (
"https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt"
),
"bert-large-uncased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt"
),
"bert-large-cased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt"
),
"bert-base-cased-finetuned-mrpc": (
"https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt"
),
"bert-base-german-dbmdz-cased": "https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt",
"bert-base-german-dbmdz-uncased": (
"https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt"
),
"TurkuNLP/bert-base-finnish-cased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt"
),
"TurkuNLP/bert-base-finnish-uncased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt"
),
"wietsedv/bert-base-dutch-cased": (
"https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"bert-base-uncased": "https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json",
"bert-large-uncased": "https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json",
"bert-base-cased": "https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json",
"bert-large-cased": "https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json",
"bert-base-multilingual-uncased": (
"https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json"
),
"bert-base-multilingual-cased": (
"https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json"
),
"bert-base-chinese": "https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json",
"bert-base-german-cased": "https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json",
"bert-large-uncased-whole-word-masking": (
"https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json"
),
"bert-large-cased-whole-word-masking": (
"https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json"
),
"bert-large-uncased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json"
),
"bert-large-cased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json"
),
"bert-base-cased-finetuned-mrpc": (
"https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json"
),
"bert-base-german-dbmdz-cased": (
"https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json"
),
"bert-base-german-dbmdz-uncased": (
"https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json"
),
"TurkuNLP/bert-base-finnish-cased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json"
),
"TurkuNLP/bert-base-finnish-uncased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json"
),
"wietsedv/bert-base-dutch-cased": (
"https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json"
),
},
}
lowercase__ : List[str] = {
"bert-base-uncased": 512,
"bert-large-uncased": 512,
"bert-base-cased": 512,
"bert-large-cased": 512,
"bert-base-multilingual-uncased": 512,
"bert-base-multilingual-cased": 512,
"bert-base-chinese": 512,
"bert-base-german-cased": 512,
"bert-large-uncased-whole-word-masking": 512,
"bert-large-cased-whole-word-masking": 512,
"bert-large-uncased-whole-word-masking-finetuned-squad": 512,
"bert-large-cased-whole-word-masking-finetuned-squad": 512,
"bert-base-cased-finetuned-mrpc": 512,
"bert-base-german-dbmdz-cased": 512,
"bert-base-german-dbmdz-uncased": 512,
"TurkuNLP/bert-base-finnish-cased-v1": 512,
"TurkuNLP/bert-base-finnish-uncased-v1": 512,
"wietsedv/bert-base-dutch-cased": 512,
}
lowercase__ : Union[str, Any] = {
"bert-base-uncased": {"do_lower_case": True},
"bert-large-uncased": {"do_lower_case": True},
"bert-base-cased": {"do_lower_case": False},
"bert-large-cased": {"do_lower_case": False},
"bert-base-multilingual-uncased": {"do_lower_case": True},
"bert-base-multilingual-cased": {"do_lower_case": False},
"bert-base-chinese": {"do_lower_case": False},
"bert-base-german-cased": {"do_lower_case": False},
"bert-large-uncased-whole-word-masking": {"do_lower_case": True},
"bert-large-cased-whole-word-masking": {"do_lower_case": False},
"bert-large-uncased-whole-word-masking-finetuned-squad": {"do_lower_case": True},
"bert-large-cased-whole-word-masking-finetuned-squad": {"do_lower_case": False},
"bert-base-cased-finetuned-mrpc": {"do_lower_case": False},
"bert-base-german-dbmdz-cased": {"do_lower_case": False},
"bert-base-german-dbmdz-uncased": {"do_lower_case": True},
"TurkuNLP/bert-base-finnish-cased-v1": {"do_lower_case": False},
"TurkuNLP/bert-base-finnish-uncased-v1": {"do_lower_case": True},
"wietsedv/bert-base-dutch-cased": {"do_lower_case": False},
}
class lowerCamelCase ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = BertTokenizer
def __init__( self : Any , UpperCAmelCase__ : List[str]=None , UpperCAmelCase__ : Optional[Any]=None , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : Any="[UNK]" , UpperCAmelCase__ : List[Any]="[SEP]" , UpperCAmelCase__ : List[str]="[PAD]" , UpperCAmelCase__ : List[str]="[CLS]" , UpperCAmelCase__ : Tuple="[MASK]" , UpperCAmelCase__ : List[Any]=True , UpperCAmelCase__ : List[str]=None , **UpperCAmelCase__ : List[str] , ) ->Optional[Any]:
super().__init__(
UpperCAmelCase__ , tokenizer_file=UpperCAmelCase__ , do_lower_case=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , sep_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , cls_token=UpperCAmelCase__ , mask_token=UpperCAmelCase__ , tokenize_chinese_chars=UpperCAmelCase__ , strip_accents=UpperCAmelCase__ , **UpperCAmelCase__ , )
UpperCAmelCase_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , UpperCAmelCase__ ) != do_lower_case
or normalizer_state.get('''strip_accents''' , UpperCAmelCase__ ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , UpperCAmelCase__ ) != tokenize_chinese_chars
):
UpperCAmelCase_ = getattr(UpperCAmelCase__ , normalizer_state.pop('''type''' ) )
UpperCAmelCase_ = do_lower_case
UpperCAmelCase_ = strip_accents
UpperCAmelCase_ = tokenize_chinese_chars
UpperCAmelCase_ = normalizer_class(**UpperCAmelCase__ )
UpperCAmelCase_ = do_lower_case
def lowerCAmelCase__ ( self : List[str] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[int]=None ) ->Optional[int]:
UpperCAmelCase_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCAmelCase__ ( self : List[Any] , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ) ->List[int]:
UpperCAmelCase_ = [self.sep_token_id]
UpperCAmelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase__ ( self : List[str] , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None ) ->Tuple[str]:
UpperCAmelCase_ = self._tokenizer.model.save(UpperCAmelCase__ , name=UpperCAmelCase__ )
return tuple(UpperCAmelCase__ )
| 390
|
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
a__ = """\
"""
a__ = """
Perplexity (PPL) is one of the most common metrics for evaluating language models.
It is defined as the exponentiated average negative log-likelihood of a sequence.
For more information, see https://huggingface.co/docs/transformers/perplexity
"""
a__ = """
Args:
model_id (str): model used for calculating Perplexity
NOTE: Perplexity can only be calculated for causal language models.
This includes models such as gpt2, causal variations of bert,
causal versions of t5, and more (the full list can be found
in the AutoModelForCausalLM documentation here:
https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )
input_texts (list of str): input text, each separate text snippet
is one list entry.
batch_size (int): the batch size to run texts through the model. Defaults to 16.
add_start_token (bool): whether to add the start token to the texts,
so the perplexity can include the probability of the first word. Defaults to True.
device (str): device to run on, defaults to 'cuda' when available
Returns:
perplexity: dictionary containing the perplexity scores for the texts
in the input list, as well as the mean perplexity. If one of the input texts is
longer than the max input length of the model, then it is truncated to the
max length for the perplexity computation.
Examples:
Example 1:
>>> perplexity = datasets.load_metric(\"perplexity\")
>>> input_texts = [\"lorem ipsum\", \"Happy Birthday!\", \"Bienvenue\"]
>>> results = perplexity.compute(model_id='gpt2',
... add_start_token=False,
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
['perplexities', 'mean_perplexity']
>>> print(round(results[\"mean_perplexity\"], 2))
78.22
>>> print(round(results[\"perplexities\"][0], 2))
11.11
Example 2:
>>> perplexity = datasets.load_metric(\"perplexity\")
>>> input_texts = datasets.load_dataset(\"wikitext\",
... \"wikitext-2-raw-v1\",
... split=\"test\")[\"text\"][:50] # doctest:+ELLIPSIS
[...]
>>> input_texts = [s for s in input_texts if s!='']
>>> results = perplexity.compute(model_id='gpt2',
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
['perplexities', 'mean_perplexity']
>>> print(round(results[\"mean_perplexity\"], 2))
60.35
>>> print(round(results[\"perplexities\"][0], 2))
81.12
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class snake_case ( datasets.Metric ):
'''simple docstring'''
def UpperCamelCase_ ( self : int) -> Any:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""input_texts""": datasets.Value("""string"""),
}) , reference_urls=["""https://huggingface.co/docs/transformers/perplexity"""] , )
def UpperCamelCase_ ( self : Optional[int] , lowerCAmelCase : List[str] , lowerCAmelCase : str , lowerCAmelCase : int = 16 , lowerCAmelCase : bool = True , lowerCAmelCase : List[str]=None) -> List[Any]:
"""simple docstring"""
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
_snake_case : int = """cuda"""
else:
_snake_case : Union[str, Any] = """cuda""" if torch.cuda.is_available() else """cpu"""
_snake_case : List[Any] = AutoModelForCausalLM.from_pretrained(lowerCAmelCase)
_snake_case : List[Any] = model.to(lowerCAmelCase)
_snake_case : List[Any] = AutoTokenizer.from_pretrained(lowerCAmelCase)
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
_snake_case : List[str] = list(tokenizer.special_tokens_map_extended.values())
# check that the model already has at least one special token defined
assert (
len(lowerCAmelCase) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({"""pad_token""": existing_special_tokens[0]})
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
_snake_case : int = model.config.max_length - 1
else:
_snake_case : Dict = model.config.max_length
_snake_case : List[Any] = tokenizer(
lowerCAmelCase , add_special_tokens=lowerCAmelCase , padding=lowerCAmelCase , truncation=lowerCAmelCase , max_length=lowerCAmelCase , return_tensors="""pt""" , return_attention_mask=lowerCAmelCase , ).to(lowerCAmelCase)
_snake_case : Dict = encodings["""input_ids"""]
_snake_case : int = encodings["""attention_mask"""]
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1) , 1)), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1) , 2)), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
_snake_case : Dict = []
_snake_case : List[str] = CrossEntropyLoss(reduction="""none""")
for start_index in logging.tqdm(range(0 , len(lowerCAmelCase) , lowerCAmelCase)):
_snake_case : Union[str, Any] = min(start_index + batch_size , len(lowerCAmelCase))
_snake_case : Tuple = encoded_texts[start_index:end_index]
_snake_case : Optional[Any] = attn_masks[start_index:end_index]
if add_start_token:
_snake_case : str = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0)).to(lowerCAmelCase)
_snake_case : Optional[int] = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1)
_snake_case : Union[str, Any] = torch.cat(
[torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa).to(lowerCAmelCase), attn_mask] , dim=1)
_snake_case : Dict = encoded_batch
with torch.no_grad():
_snake_case : Optional[Any] = model(lowerCAmelCase , attention_mask=lowerCAmelCase).logits
_snake_case : Tuple = out_logits[..., :-1, :].contiguous()
_snake_case : int = labels[..., 1:].contiguous()
_snake_case : List[str] = attn_mask[..., 1:].contiguous()
_snake_case : Optional[Any] = torch.expa(
(loss_fct(shift_logits.transpose(1 , 2) , lowerCAmelCase) * shift_attention_mask_batch).sum(1)
/ shift_attention_mask_batch.sum(1))
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(lowerCAmelCase)}
| 477
| 0
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Tuple , __UpperCamelCase : str ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = b.T
SCREAMING_SNAKE_CASE__ = np.sum(np.square(__UpperCamelCase ) , axis=1 )
SCREAMING_SNAKE_CASE__ = np.sum(np.square(__UpperCamelCase ) , axis=0 )
SCREAMING_SNAKE_CASE__ = np.matmul(__UpperCamelCase , __UpperCamelCase )
SCREAMING_SNAKE_CASE__ = aa[:, None] - 2 * ab + ba[None, :]
return d
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Union[str, Any] , __UpperCamelCase : str ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = x.reshape(-1 , 3 )
SCREAMING_SNAKE_CASE__ = squared_euclidean_distance(__UpperCamelCase , __UpperCamelCase )
return np.argmin(__UpperCamelCase , axis=1 )
class __snake_case ( lowerCamelCase_ ):
lowerCAmelCase_ = ["pixel_values"]
def __init__( self : List[str] , _lowercase : Optional[Union[List[List[int]], np.ndarray]] = None , _lowercase : bool = True , _lowercase : Dict[str, int] = None , _lowercase : PILImageResampling = PILImageResampling.BILINEAR , _lowercase : bool = True , _lowercase : bool = True , **_lowercase : List[Any] , ):
"""simple docstring"""
super().__init__(**_lowercase )
SCREAMING_SNAKE_CASE__ = size if size is not None else {"""height""": 2_56, """width""": 2_56}
SCREAMING_SNAKE_CASE__ = get_size_dict(_lowercase )
SCREAMING_SNAKE_CASE__ = np.array(_lowercase ) if clusters is not None else None
SCREAMING_SNAKE_CASE__ = do_resize
SCREAMING_SNAKE_CASE__ = size
SCREAMING_SNAKE_CASE__ = resample
SCREAMING_SNAKE_CASE__ = do_normalize
SCREAMING_SNAKE_CASE__ = do_color_quantize
def __a ( self : int , _lowercase : np.ndarray , _lowercase : Dict[str, int] , _lowercase : PILImageResampling = PILImageResampling.BILINEAR , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : str , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = get_size_dict(_lowercase )
if "height" not in size or "width" not in size:
raise ValueError(f"""Size dictionary must contain both height and width keys. Got {size.keys()}""" )
return resize(
_lowercase , size=(size["""height"""], size["""width"""]) , resample=_lowercase , data_format=_lowercase , **_lowercase )
def __a ( self : Optional[int] , _lowercase : np.ndarray , _lowercase : Optional[Union[str, ChannelDimension]] = None , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = rescale(image=_lowercase , scale=1 / 1_27.5 , data_format=_lowercase )
SCREAMING_SNAKE_CASE__ = image - 1
return image
def __a ( self : List[Any] , _lowercase : ImageInput , _lowercase : bool = None , _lowercase : Dict[str, int] = None , _lowercase : PILImageResampling = None , _lowercase : bool = None , _lowercase : Optional[bool] = None , _lowercase : Optional[Union[List[List[int]], np.ndarray]] = None , _lowercase : Optional[Union[str, TensorType]] = None , _lowercase : Optional[Union[str, ChannelDimension]] = ChannelDimension.FIRST , **_lowercase : List[str] , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE__ = size if size is not None else self.size
SCREAMING_SNAKE_CASE__ = get_size_dict(_lowercase )
SCREAMING_SNAKE_CASE__ = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE__ = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE__ = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
SCREAMING_SNAKE_CASE__ = clusters if clusters is not None else self.clusters
SCREAMING_SNAKE_CASE__ = np.array(_lowercase )
SCREAMING_SNAKE_CASE__ = make_list_of_images(_lowercase )
if not valid_images(_lowercase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_color_quantize and clusters is None:
raise ValueError("""Clusters must be specified if do_color_quantize is True.""" )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE__ = [to_numpy_array(_lowercase ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE__ = [self.resize(image=_lowercase , size=_lowercase , resample=_lowercase ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE__ = [self.normalize(image=_lowercase ) for image in images]
if do_color_quantize:
SCREAMING_SNAKE_CASE__ = [to_channel_dimension_format(_lowercase , ChannelDimension.LAST ) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
SCREAMING_SNAKE_CASE__ = np.array(_lowercase )
SCREAMING_SNAKE_CASE__ = color_quantize(_lowercase , _lowercase ).reshape(images.shape[:-1] )
# flatten to (batch_size, height*width)
SCREAMING_SNAKE_CASE__ = images.shape[0]
SCREAMING_SNAKE_CASE__ = images.reshape(_lowercase , -1 )
# We need to convert back to a list of images to keep consistent behaviour across processors.
SCREAMING_SNAKE_CASE__ = list(_lowercase )
else:
SCREAMING_SNAKE_CASE__ = [to_channel_dimension_format(_lowercase , _lowercase ) for image in images]
SCREAMING_SNAKE_CASE__ = {"""input_ids""": images}
return BatchFeature(data=_lowercase , tensor_type=_lowercase )
| 379
|
# Logistic Regression from scratch
# In[62]:
# In[63]:
# importing all the required libraries
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Tuple ) -> int:
"""simple docstring"""
return 1 / (1 + np.exp(-z ))
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[Any] , __UpperCamelCase : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
return (-y * np.log(__UpperCamelCase ) - (1 - y) * np.log(1 - h )).mean()
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : Union[str, Any] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = np.dot(__UpperCamelCase , __UpperCamelCase )
return np.sum(y * scores - np.log(1 + np.exp(__UpperCamelCase ) ) )
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Tuple , __UpperCamelCase : Optional[int] , __UpperCamelCase : Tuple , __UpperCamelCase : Any=7_00_00 ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = np.zeros(x.shape[1] )
for iterations in range(__UpperCamelCase ):
SCREAMING_SNAKE_CASE__ = np.dot(__UpperCamelCase , __UpperCamelCase )
SCREAMING_SNAKE_CASE__ = sigmoid_function(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ = np.dot(x.T , h - y ) / y.size
SCREAMING_SNAKE_CASE__ = theta - alpha * gradient # updating the weights
SCREAMING_SNAKE_CASE__ = np.dot(__UpperCamelCase , __UpperCamelCase )
SCREAMING_SNAKE_CASE__ = sigmoid_function(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ = cost_function(__UpperCamelCase , __UpperCamelCase )
if iterations % 1_00 == 0:
print(f"""loss: {j} \t""" ) # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
__lowerCamelCase : List[Any] = datasets.load_iris()
__lowerCamelCase : List[Any] = iris.data[:, :2]
__lowerCamelCase : Dict = (iris.target != 0) * 1
__lowerCamelCase : List[str] = 0.1
__lowerCamelCase : str = logistic_reg(alpha, x, y, max_iterations=7_0000)
print('''theta: ''', theta) # printing the theta i.e our weights vector
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[str] ) -> Tuple:
"""simple docstring"""
return sigmoid_function(
np.dot(__UpperCamelCase , __UpperCamelCase ) ) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(10, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color='''b''', label='''0''')
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color='''r''', label='''1''')
((__lowerCamelCase) , (__lowerCamelCase)) : int = (x[:, 0].min(), x[:, 0].max())
((__lowerCamelCase) , (__lowerCamelCase)) : List[Any] = (x[:, 1].min(), x[:, 1].max())
((__lowerCamelCase) , (__lowerCamelCase)) : List[Any] = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
__lowerCamelCase : Optional[int] = np.c_[xxa.ravel(), xxa.ravel()]
__lowerCamelCase : Optional[int] = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors='''black''')
plt.legend()
plt.show()
| 379
| 1
|
__lowerCamelCase : int = "\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
__lowerCamelCase : Dict = [{"type": "code", "content": INSTALL_CONTENT}]
__lowerCamelCase : Optional[int] = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 323
|
import torch
from torch import nn
class __magic_name__ ( nn.Module ):
def __init__( self : Optional[int] , UpperCamelCase__ : List[str] , UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : int , UpperCamelCase__ : Any=1 , UpperCamelCase__ : int=False ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
UpperCAmelCase = n_token
UpperCAmelCase = d_embed
UpperCAmelCase = d_proj
UpperCAmelCase = cutoffs + [n_token]
UpperCAmelCase = [0] + self.cutoffs
UpperCAmelCase = div_val
UpperCAmelCase = self.cutoffs[0]
UpperCAmelCase = len(self.cutoffs ) - 1
UpperCAmelCase = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
UpperCAmelCase = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed ) )
UpperCAmelCase = nn.Parameter(torch.zeros(self.n_clusters ) )
UpperCAmelCase = nn.ModuleList()
UpperCAmelCase = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs ) ):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(UpperCamelCase__ , UpperCamelCase__ ) ) )
else:
self.out_projs.append(UpperCamelCase__ )
self.out_layers.append(nn.Linear(UpperCamelCase__ , UpperCamelCase__ ) )
else:
for i in range(len(self.cutoffs ) ):
UpperCAmelCase , UpperCAmelCase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
UpperCAmelCase = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(UpperCamelCase__ , UpperCamelCase__ ) ) )
self.out_layers.append(nn.Linear(UpperCamelCase__ , r_idx - l_idx ) )
UpperCAmelCase = keep_order
def SCREAMING_SNAKE_CASE_ ( self : List[str] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[str] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
if proj is None:
UpperCAmelCase = nn.functional.linear(UpperCamelCase__ , UpperCamelCase__ , bias=UpperCamelCase__ )
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
UpperCAmelCase = nn.functional.linear(UpperCamelCase__ , proj.t().contiguous() )
UpperCAmelCase = nn.functional.linear(UpperCamelCase__ , UpperCamelCase__ , bias=UpperCamelCase__ )
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : List[str]=False ) -> Optional[int]:
'''simple docstring'''
if labels is not None:
# Shift so that tokens < n predict n
UpperCAmelCase = hidden[..., :-1, :].contiguous()
UpperCAmelCase = labels[..., 1:].contiguous()
UpperCAmelCase = hidden.view(-1 , hidden.size(-1 ) )
UpperCAmelCase = labels.view(-1 )
if hidden.size(0 ) != labels.size(0 ):
raise RuntimeError("Input and labels should have the same size in the batch dimension." )
else:
UpperCAmelCase = hidden.view(-1 , hidden.size(-1 ) )
if self.n_clusters == 0:
UpperCAmelCase = self._compute_logit(UpperCamelCase__ , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
if labels is not None:
UpperCAmelCase = labels != -1_00
UpperCAmelCase = torch.zeros_like(UpperCamelCase__ , dtype=hidden.dtype , device=hidden.device )
UpperCAmelCase = (
-nn.functional.log_softmax(UpperCamelCase__ , dim=-1 )[mask].gather(1 , labels[mask].unsqueeze(1 ) ).squeeze(1 )
)
else:
UpperCAmelCase = nn.functional.log_softmax(UpperCamelCase__ , dim=-1 )
else:
# construct weights and biases
UpperCAmelCase , UpperCAmelCase = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
UpperCAmelCase , UpperCAmelCase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
UpperCAmelCase = self.out_layers[0].weight[l_idx:r_idx]
UpperCAmelCase = self.out_layers[0].bias[l_idx:r_idx]
else:
UpperCAmelCase = self.out_layers[i].weight
UpperCAmelCase = self.out_layers[i].bias
if i == 0:
UpperCAmelCase = torch.cat([weight_i, self.cluster_weight] , dim=0 )
UpperCAmelCase = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(UpperCamelCase__ )
biases.append(UpperCamelCase__ )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = weights[0], biases[0], self.out_projs[0]
UpperCAmelCase = self._compute_logit(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
UpperCAmelCase = nn.functional.log_softmax(UpperCamelCase__ , dim=1 )
if labels is None:
UpperCAmelCase = hidden.new_empty((head_logit.size(0 ), self.n_token) )
else:
UpperCAmelCase = torch.zeros_like(UpperCamelCase__ , dtype=hidden.dtype , device=hidden.device )
UpperCAmelCase = 0
UpperCAmelCase = [0] + self.cutoffs
for i in range(len(UpperCamelCase__ ) - 1 ):
UpperCAmelCase , UpperCAmelCase = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
UpperCAmelCase = (labels >= l_idx) & (labels < r_idx)
UpperCAmelCase = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
UpperCAmelCase = labels.index_select(0 , UpperCamelCase__ ) - l_idx
UpperCAmelCase = head_logprob.index_select(0 , UpperCamelCase__ )
UpperCAmelCase = hidden.index_select(0 , UpperCamelCase__ )
else:
UpperCAmelCase = hidden
if i == 0:
if labels is not None:
UpperCAmelCase = head_logprob_i.gather(1 , target_i[:, None] ).squeeze(1 )
else:
UpperCAmelCase = head_logprob[:, : self.cutoffs[0]]
else:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = weights[i], biases[i], self.out_projs[i]
UpperCAmelCase = self._compute_logit(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
UpperCAmelCase = nn.functional.log_softmax(UpperCamelCase__ , dim=1 )
UpperCAmelCase = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
UpperCAmelCase = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 , target_i[:, None] ).squeeze(1 )
else:
UpperCAmelCase = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
UpperCAmelCase = logprob_i
if labels is not None:
if (hasattr(self , "keep_order" ) and self.keep_order) or keep_order:
out.index_copy_(0 , UpperCamelCase__ , -logprob_i )
else:
out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i )
offset += logprob_i.size(0 )
return out
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCamelCase__ : Optional[Any] ) -> int:
'''simple docstring'''
if self.n_clusters == 0:
UpperCAmelCase = self._compute_logit(UpperCamelCase__ , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
return nn.functional.log_softmax(UpperCamelCase__ , dim=-1 )
else:
# construct weights and biases
UpperCAmelCase , UpperCAmelCase = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
UpperCAmelCase , UpperCAmelCase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
UpperCAmelCase = self.out_layers[0].weight[l_idx:r_idx]
UpperCAmelCase = self.out_layers[0].bias[l_idx:r_idx]
else:
UpperCAmelCase = self.out_layers[i].weight
UpperCAmelCase = self.out_layers[i].bias
if i == 0:
UpperCAmelCase = torch.cat([weight_i, self.cluster_weight] , dim=0 )
UpperCAmelCase = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(UpperCamelCase__ )
biases.append(UpperCamelCase__ )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = weights[0], biases[0], self.out_projs[0]
UpperCAmelCase = self._compute_logit(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
UpperCAmelCase = hidden.new_empty((head_logit.size(0 ), self.n_token) )
UpperCAmelCase = nn.functional.log_softmax(UpperCamelCase__ , dim=1 )
UpperCAmelCase = [0] + self.cutoffs
for i in range(len(UpperCamelCase__ ) - 1 ):
UpperCAmelCase , UpperCAmelCase = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
UpperCAmelCase = head_logprob[:, : self.cutoffs[0]]
else:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = weights[i], biases[i], self.out_projs[i]
UpperCAmelCase = self._compute_logit(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
UpperCAmelCase = nn.functional.log_softmax(UpperCamelCase__ , dim=1 )
UpperCAmelCase = head_logprob[:, -i] + tail_logprob_i
UpperCAmelCase = logprob_i
return out
| 323
| 1
|
import torch
from diffusers import DiffusionPipeline
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
def __init__( self : List[Any] ,snake_case : Optional[int] ,snake_case : Optional[Any] ):
super().__init__()
self.register_modules(unet=snake_case ,scheduler=snake_case )
def __call__( self : Dict ):
SCREAMING_SNAKE_CASE =torch.randn(
(1, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) ,)
SCREAMING_SNAKE_CASE =1
SCREAMING_SNAKE_CASE =self.unet(snake_case ,snake_case ).sample
SCREAMING_SNAKE_CASE =self.scheduler.step(snake_case ,snake_case ,snake_case ).prev_sample
SCREAMING_SNAKE_CASE =scheduler_output - scheduler_output + torch.ones_like(snake_case )
return result
| 719
|
import math
from numpy import inf
from scipy.integrate import quad
def snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
if num <= 0:
raise ValueError('math domain error' )
return quad(lowerCAmelCase_, 0, lowerCAmelCase_, args=(lowerCAmelCase_) )[0]
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
return math.pow(lowerCAmelCase_, z - 1 ) * math.exp(-x )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 252
| 0
|
"""simple docstring"""
from scipy.stats import pearsonr
import datasets
SCREAMING_SNAKE_CASE_ = '\nPearson correlation coefficient and p-value for testing non-correlation.\nThe Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.\nThe p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.\n'
SCREAMING_SNAKE_CASE_ = '\nArgs:\n predictions (`list` of `int`): Predicted class labels, as returned by a model.\n references (`list` of `int`): Ground truth labels.\n return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.\n\nReturns:\n pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.\n p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.\n\nExamples:\n\n Example 1-A simple example using only predictions and references.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n\n Example 2-The same as Example 1, but that also returns the `p-value`.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)\n >>> print(sorted(list(results.keys())))\n [\'p-value\', \'pearsonr\']\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n >>> print(round(results[\'p-value\'], 2))\n 0.15\n'
SCREAMING_SNAKE_CASE_ = '\n@article{2020SciPy-NMeth,\nauthor = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, Ilhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Antonio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\ntitle = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\njournal = {Nature Methods},\nyear = {2020},\nvolume = {17},\npages = {261--272},\nadsurl = {https://rdcu.be/b08Wh},\ndoi = {10.1038/s41592-019-0686-2},\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case_ ( datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase__ ( self) -> Tuple:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''float'''),
'''references''': datasets.Value('''float'''),
}) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'''] , )
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=False) -> Any:
if return_pvalue:
UpperCamelCase = pearsonr(lowerCamelCase_ , lowerCamelCase_)
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(lowerCamelCase_ , lowerCamelCase_)[0])}
| 34
|
'''simple docstring'''
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse('''9.1.0'''):
lowercase_ : Any = {
'''linear''': PIL.Image.Resampling.BILINEAR,
'''bilinear''': PIL.Image.Resampling.BILINEAR,
'''bicubic''': PIL.Image.Resampling.BICUBIC,
'''lanczos''': PIL.Image.Resampling.LANCZOS,
'''nearest''': PIL.Image.Resampling.NEAREST,
}
else:
lowercase_ : str = {
'''linear''': PIL.Image.LINEAR,
'''bilinear''': PIL.Image.BILINEAR,
'''bicubic''': PIL.Image.BICUBIC,
'''lanczos''': PIL.Image.LANCZOS,
'''nearest''': PIL.Image.NEAREST,
}
def SCREAMING_SNAKE_CASE ( lowercase_ : Optional[int] ):
lowercase = (images / 2 + 0.5).clamp(0 , 1 )
lowercase = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
lowercase = numpy_to_pil(lowercase_ )
return images
def SCREAMING_SNAKE_CASE ( lowercase_ : List[Any] ):
if images.ndim == 3:
lowercase = images[None, ...]
lowercase = (images * 255).round().astype("""uint8""" )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
lowercase = [Image.fromarray(image.squeeze() , mode="""L""" ) for image in images]
else:
lowercase = [Image.fromarray(lowercase_ ) for image in images]
return pil_images
| 588
| 0
|
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase_ ( self ) -> Optional[Any]:
_UpperCAmelCase = '| <pad> <unk> <s> </s> a b c d e f g h i j k'.split()
_UpperCAmelCase = dict(zip(snake_case , range(len(snake_case ) ) ) )
_UpperCAmelCase = {
'unk_token': '<unk>',
'bos_token': '<s>',
'eos_token': '</s>',
}
_UpperCAmelCase = {
'feature_size': 1,
'padding_value': 0.0,
'sampling_rate': 16000,
'return_attention_mask': False,
'do_normalize': True,
}
_UpperCAmelCase = tempfile.mkdtemp()
_UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
_UpperCAmelCase = os.path.join(self.tmpdirname , snake_case )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(snake_case ) + '\n' )
with open(self.feature_extraction_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(snake_case ) + '\n' )
# load decoder from hub
_UpperCAmelCase = 'hf-internal-testing/ngram-beam-search-decoder'
def lowerCamelCase_ ( self , **snake_case ) -> Tuple:
_UpperCAmelCase = self.add_kwargs_tokens_map.copy()
kwargs.update(snake_case )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **snake_case )
def lowerCamelCase_ ( self , **snake_case ) -> int:
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **snake_case )
def lowerCamelCase_ ( self , **snake_case ) -> int:
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **snake_case )
def lowerCamelCase_ ( self ) -> Union[str, Any]:
shutil.rmtree(self.tmpdirname )
def lowerCamelCase_ ( self ) -> List[str]:
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = self.get_feature_extractor()
_UpperCAmelCase = self.get_decoder()
_UpperCAmelCase = WavaVecaProcessorWithLM(tokenizer=snake_case , feature_extractor=snake_case , decoder=snake_case )
processor.save_pretrained(self.tmpdirname )
_UpperCAmelCase = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , snake_case )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , snake_case )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , snake_case )
def lowerCamelCase_ ( self ) -> List[str]:
_UpperCAmelCase = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
_UpperCAmelCase = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def lowerCamelCase_ ( self ) -> int:
_UpperCAmelCase = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(['xx'] )
with self.assertRaisesRegex(snake_case , 'include' ):
WavaVecaProcessorWithLM(
tokenizer=snake_case , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def lowerCamelCase_ ( self ) -> Optional[int]:
_UpperCAmelCase = self.get_feature_extractor()
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = self.get_decoder()
_UpperCAmelCase = WavaVecaProcessorWithLM(tokenizer=snake_case , feature_extractor=snake_case , decoder=snake_case )
_UpperCAmelCase = floats_list((3, 1000) )
_UpperCAmelCase = feature_extractor(snake_case , return_tensors='np' )
_UpperCAmelCase = processor(snake_case , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def lowerCamelCase_ ( self ) -> int:
_UpperCAmelCase = self.get_feature_extractor()
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = self.get_decoder()
_UpperCAmelCase = WavaVecaProcessorWithLM(tokenizer=snake_case , feature_extractor=snake_case , decoder=snake_case )
_UpperCAmelCase = 'This is a test string'
_UpperCAmelCase = processor(text=snake_case )
_UpperCAmelCase = tokenizer(snake_case )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCamelCase_ ( self , snake_case=(2, 10, 16) , snake_case=77 ) -> Optional[Any]:
np.random.seed(snake_case )
return np.random.rand(*snake_case )
def lowerCamelCase_ ( self ) -> Optional[int]:
_UpperCAmelCase = self.get_feature_extractor()
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = self.get_decoder()
_UpperCAmelCase = WavaVecaProcessorWithLM(tokenizer=snake_case , feature_extractor=snake_case , decoder=snake_case )
_UpperCAmelCase = self._get_dummy_logits(shape=(10, 16) , seed=13 )
_UpperCAmelCase = processor.decode(snake_case )
_UpperCAmelCase = decoder.decode_beams(snake_case )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual('</s> <s> </s>' , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ['fork'], ['spawn']] )
def lowerCamelCase_ ( self , snake_case ) -> List[Any]:
_UpperCAmelCase = self.get_feature_extractor()
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = self.get_decoder()
_UpperCAmelCase = WavaVecaProcessorWithLM(tokenizer=snake_case , feature_extractor=snake_case , decoder=snake_case )
_UpperCAmelCase = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
_UpperCAmelCase = processor.batch_decode(snake_case )
else:
with get_context(snake_case ).Pool() as pool:
_UpperCAmelCase = processor.batch_decode(snake_case , snake_case )
_UpperCAmelCase = list(snake_case )
with get_context('fork' ).Pool() as p:
_UpperCAmelCase = decoder.decode_beams_batch(snake_case , snake_case )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(snake_case , decoded_processor.text )
self.assertListEqual(['<s> <s> </s>', '<s> <s> <s>'] , decoded_processor.text )
self.assertListEqual(snake_case , decoded_processor.logit_score )
self.assertListEqual(snake_case , decoded_processor.lm_score )
def lowerCamelCase_ ( self ) -> Optional[int]:
_UpperCAmelCase = self.get_feature_extractor()
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = self.get_decoder()
_UpperCAmelCase = WavaVecaProcessorWithLM(tokenizer=snake_case , feature_extractor=snake_case , decoder=snake_case )
_UpperCAmelCase = self._get_dummy_logits()
_UpperCAmelCase = 15
_UpperCAmelCase = -20.0
_UpperCAmelCase = -4.0
_UpperCAmelCase = processor.batch_decode(
snake_case , beam_width=snake_case , beam_prune_logp=snake_case , token_min_logp=snake_case , )
_UpperCAmelCase = decoded_processor_out.text
_UpperCAmelCase = list(snake_case )
with get_context('fork' ).Pool() as pool:
_UpperCAmelCase = decoder.decode_beams_batch(
snake_case , snake_case , beam_width=snake_case , beam_prune_logp=snake_case , token_min_logp=snake_case , )
_UpperCAmelCase = [d[0][0] for d in decoded_decoder_out]
_UpperCAmelCase = [d[0][2] for d in decoded_decoder_out]
_UpperCAmelCase = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(snake_case , snake_case )
self.assertListEqual(['</s> <s> <s>', '<s> <s> <s>'] , snake_case )
self.assertTrue(np.array_equal(snake_case , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-20.054, -18.447] , snake_case , atol=1E-3 ) )
self.assertTrue(np.array_equal(snake_case , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-15.554, -13.9474] , snake_case , atol=1E-3 ) )
def lowerCamelCase_ ( self ) -> int:
_UpperCAmelCase = self.get_feature_extractor()
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = self.get_decoder()
_UpperCAmelCase = WavaVecaProcessorWithLM(tokenizer=snake_case , feature_extractor=snake_case , decoder=snake_case )
_UpperCAmelCase = self._get_dummy_logits()
_UpperCAmelCase = 2.0
_UpperCAmelCase = 5.0
_UpperCAmelCase = -20.0
_UpperCAmelCase = True
_UpperCAmelCase = processor.batch_decode(
snake_case , alpha=snake_case , beta=snake_case , unk_score_offset=snake_case , lm_score_boundary=snake_case , )
_UpperCAmelCase = decoded_processor_out.text
_UpperCAmelCase = list(snake_case )
decoder.reset_params(
alpha=snake_case , beta=snake_case , unk_score_offset=snake_case , lm_score_boundary=snake_case , )
with get_context('fork' ).Pool() as pool:
_UpperCAmelCase = decoder.decode_beams_batch(
snake_case , snake_case , )
_UpperCAmelCase = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(snake_case , snake_case )
self.assertListEqual(['<s> </s> <s> </s> </s>', '</s> </s> <s> </s> </s>'] , snake_case )
_UpperCAmelCase = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -20.0 )
self.assertEqual(lm_model.score_boundary , snake_case )
def lowerCamelCase_ ( self ) -> str:
_UpperCAmelCase = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' )
_UpperCAmelCase = processor.decoder.model_container[processor.decoder._model_key]
_UpperCAmelCase = Path(language_model._kenlm_model.path.decode('utf-8' ) ).parent.parent.absolute()
_UpperCAmelCase = os.listdir(snake_case )
_UpperCAmelCase = ['alphabet.json', 'language_model']
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(snake_case , snake_case )
def lowerCamelCase_ ( self ) -> Optional[int]:
_UpperCAmelCase = snapshot_download('hf-internal-testing/processor_with_lm' )
_UpperCAmelCase = WavaVecaProcessorWithLM.from_pretrained(snake_case )
_UpperCAmelCase = processor.decoder.model_container[processor.decoder._model_key]
_UpperCAmelCase = Path(language_model._kenlm_model.path.decode('utf-8' ) ).parent.parent.absolute()
_UpperCAmelCase = os.listdir(snake_case )
_UpperCAmelCase = os.listdir(snake_case )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(snake_case , snake_case )
def lowerCamelCase_ ( self ) -> Union[str, Any]:
_UpperCAmelCase = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' )
_UpperCAmelCase = AutoProcessor.from_pretrained('hf-internal-testing/processor_with_lm' )
_UpperCAmelCase = floats_list((3, 1000) )
_UpperCAmelCase = processor_wavaveca(snake_case , return_tensors='np' )
_UpperCAmelCase = processor_auto(snake_case , return_tensors='np' )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1E-2 )
_UpperCAmelCase = self._get_dummy_logits()
_UpperCAmelCase = processor_wavaveca.batch_decode(snake_case )
_UpperCAmelCase = processor_auto.batch_decode(snake_case )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def lowerCamelCase_ ( self ) -> Optional[Any]:
_UpperCAmelCase = self.get_feature_extractor()
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = self.get_decoder()
_UpperCAmelCase = WavaVecaProcessorWithLM(tokenizer=snake_case , feature_extractor=snake_case , decoder=snake_case )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg='`processor` and `feature_extractor` model input names do not match' , )
@staticmethod
def lowerCamelCase_ ( snake_case , snake_case ) -> Dict:
_UpperCAmelCase = [d[key] for d in offsets]
return retrieved_list
def lowerCamelCase_ ( self ) -> Optional[int]:
_UpperCAmelCase = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' )
_UpperCAmelCase = self._get_dummy_logits()[0]
_UpperCAmelCase = processor.decode(snake_case , output_word_offsets=snake_case )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('text' in outputs )
self.assertTrue('word_offsets' in outputs )
self.assertTrue(isinstance(snake_case , snake_case ) )
self.assertEqual(' '.join(self.get_from_offsets(outputs['word_offsets'] , 'word' ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'] , 'word' ) , ['<s>', '<s>', '</s>'] )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'] , 'start_offset' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'] , 'end_offset' ) , [1, 3, 5] )
def lowerCamelCase_ ( self ) -> Union[str, Any]:
_UpperCAmelCase = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' )
_UpperCAmelCase = self._get_dummy_logits()
_UpperCAmelCase = processor.batch_decode(snake_case , output_word_offsets=snake_case )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('text' in outputs )
self.assertTrue('word_offsets' in outputs )
self.assertTrue(isinstance(snake_case , snake_case ) )
self.assertListEqual(
[' '.join(self.get_from_offsets(snake_case , 'word' ) ) for o in outputs['word_offsets']] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'][0] , 'word' ) , ['<s>', '<s>', '</s>'] )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'][0] , 'start_offset' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'][0] , 'end_offset' ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def lowerCamelCase_ ( self ) -> List[str]:
import torch
_UpperCAmelCase = load_dataset('common_voice' , 'en' , split='train' , streaming=snake_case )
_UpperCAmelCase = ds.cast_column('audio' , datasets.Audio(sampling_rate=16000 ) )
_UpperCAmelCase = iter(snake_case )
_UpperCAmelCase = next(snake_case )
_UpperCAmelCase = AutoProcessor.from_pretrained('patrickvonplaten/wav2vec2-base-100h-with-lm' )
_UpperCAmelCase = WavaVecaForCTC.from_pretrained('patrickvonplaten/wav2vec2-base-100h-with-lm' )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
_UpperCAmelCase = processor(sample['audio']['array'] , return_tensors='pt' ).input_values
with torch.no_grad():
_UpperCAmelCase = model(snake_case ).logits.cpu().numpy()
_UpperCAmelCase = processor.decode(logits[0] , output_word_offsets=snake_case )
_UpperCAmelCase = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
_UpperCAmelCase = [
{
'start_time': d['start_offset'] * time_offset,
'end_time': d['end_offset'] * time_offset,
'word': d['word'],
}
for d in output['word_offsets']
]
_UpperCAmelCase = 'WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL'
# output words
self.assertEqual(' '.join(self.get_from_offsets(snake_case , 'word' ) ) , snake_case )
self.assertEqual(' '.join(self.get_from_offsets(snake_case , 'word' ) ) , output.text )
# output times
_UpperCAmelCase = torch.tensor(self.get_from_offsets(snake_case , 'start_time' ) )
_UpperCAmelCase = torch.tensor(self.get_from_offsets(snake_case , 'end_time' ) )
# fmt: off
_UpperCAmelCase = torch.tensor([1.4199, 1.6599, 2.2599, 3.0, 3.24, 3.5999, 3.7999, 4.0999, 4.26, 4.94, 5.28, 5.6599, 5.78, 5.94, 6.32, 6.5399, 6.6599] )
_UpperCAmelCase = torch.tensor([1.5399, 1.8999, 2.9, 3.16, 3.5399, 3.72, 4.0199, 4.1799, 4.76, 5.1599, 5.5599, 5.6999, 5.86, 6.1999, 6.38, 6.6199, 6.94] )
# fmt: on
self.assertTrue(torch.allclose(snake_case , snake_case , atol=0.01 ) )
self.assertTrue(torch.allclose(snake_case , snake_case , atol=0.01 ) )
| 706
|
"""simple docstring"""
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class lowercase__ ( nn.Module ):
'''simple docstring'''
def __init__( self , snake_case = 16 , snake_case = 88 , snake_case = None , snake_case = 1 , snake_case = 0.0 , snake_case = 32 , snake_case = None , snake_case = False , snake_case = None , snake_case = None , snake_case = "geglu" , snake_case = None , ) -> str:
super().__init__()
_UpperCAmelCase = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=snake_case , attention_head_dim=snake_case , in_channels=snake_case , num_layers=snake_case , dropout=snake_case , norm_num_groups=snake_case , cross_attention_dim=snake_case , attention_bias=snake_case , sample_size=snake_case , num_vector_embeds=snake_case , activation_fn=snake_case , num_embeds_ada_norm=snake_case , )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
_UpperCAmelCase = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
_UpperCAmelCase = [77, 257]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
_UpperCAmelCase = [1, 0]
def lowerCamelCase_ ( self , snake_case , snake_case , snake_case=None , snake_case=None , snake_case=None , snake_case = True , ) -> Any:
_UpperCAmelCase = hidden_states
_UpperCAmelCase = []
_UpperCAmelCase = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
_UpperCAmelCase = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
_UpperCAmelCase = self.transformer_index_for_condition[i]
_UpperCAmelCase = self.transformers[transformer_index](
snake_case , encoder_hidden_states=snake_case , timestep=snake_case , cross_attention_kwargs=snake_case , return_dict=snake_case , )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
_UpperCAmelCase = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
_UpperCAmelCase = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=snake_case )
| 24
| 0
|
'''simple docstring'''
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , _lowercase , _lowercase = True , _lowercase = None , _lowercase = 32 , _lowercase = True , _lowercase = 1 / 255 , _lowercase = True , _lowercase = True , _lowercase = [0.4814_5466, 0.457_8275, 0.4082_1073] , _lowercase = [0.2686_2954, 0.2613_0258, 0.2757_7711] , _lowercase = True , _lowercase=7 , _lowercase=30 , _lowercase=400 , _lowercase=3 , ):
"""simple docstring"""
_lowerCAmelCase = parent
_lowerCAmelCase = do_resize
_lowerCAmelCase = size if size is not None else {"""shortest_edge""": 288}
_lowerCAmelCase = size_divisor
_lowerCAmelCase = do_rescale
_lowerCAmelCase = rescale_factor
_lowerCAmelCase = do_normalize
_lowerCAmelCase = do_center_crop
_lowerCAmelCase = image_mean
_lowerCAmelCase = image_std
_lowerCAmelCase = do_pad
_lowerCAmelCase = batch_size
_lowerCAmelCase = num_channels
_lowerCAmelCase = min_resolution
_lowerCAmelCase = max_resolution
def _lowercase ( self ):
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def _lowercase ( self , _lowercase , _lowercase=False ):
"""simple docstring"""
if not batched:
_lowerCAmelCase = self.size["""shortest_edge"""]
_lowerCAmelCase = image_inputs[0]
if isinstance(_lowercase , Image.Image ):
_lowerCAmelCase , _lowerCAmelCase = image.size
else:
_lowerCAmelCase , _lowerCAmelCase = image.shape[1], image.shape[2]
_lowerCAmelCase = size / min(_lowercase , _lowercase )
if h < w:
_lowerCAmelCase , _lowerCAmelCase = size, scale * w
else:
_lowerCAmelCase , _lowerCAmelCase = scale * h, size
_lowerCAmelCase = int((1_333 / 800) * size )
if max(_lowercase , _lowercase ) > max_size:
_lowerCAmelCase = max_size / max(_lowercase , _lowercase )
_lowerCAmelCase = newh * scale
_lowerCAmelCase = neww * scale
_lowerCAmelCase , _lowerCAmelCase = int(newh + 0.5 ), int(neww + 0.5 )
_lowerCAmelCase , _lowerCAmelCase = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
_lowerCAmelCase = []
for image in image_inputs:
_lowerCAmelCase , _lowerCAmelCase = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_lowerCAmelCase = max(_lowercase , key=lambda _lowercase : item[0] )[0]
_lowerCAmelCase = max(_lowercase , key=lambda _lowercase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
_lowercase : Optional[int] = BridgeTowerImageProcessor if is_vision_available() else None
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = BridgeTowerImageProcessingTester(self )
@property
def _lowercase ( self ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowercase , """image_mean""" ) )
self.assertTrue(hasattr(_lowercase , """image_std""" ) )
self.assertTrue(hasattr(_lowercase , """do_normalize""" ) )
self.assertTrue(hasattr(_lowercase , """do_resize""" ) )
self.assertTrue(hasattr(_lowercase , """size""" ) )
self.assertTrue(hasattr(_lowercase , """size_divisor""" ) )
def _lowercase ( self ):
"""simple docstring"""
pass
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , Image.Image )
# Test not batched input
_lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
_lowerCAmelCase , _lowerCAmelCase = self.image_processor_tester.get_expected_values(_lowercase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_lowerCAmelCase = image_processing(_lowercase , return_tensors="""pt""" ).pixel_values
_lowerCAmelCase , _lowerCAmelCase = self.image_processor_tester.get_expected_values(_lowercase , batched=_lowercase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase , numpify=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , np.ndarray )
# Test not batched input
_lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
_lowerCAmelCase , _lowerCAmelCase = self.image_processor_tester.get_expected_values(_lowercase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_lowerCAmelCase = image_processing(_lowercase , return_tensors="""pt""" ).pixel_values
_lowerCAmelCase , _lowerCAmelCase = self.image_processor_tester.get_expected_values(_lowercase , batched=_lowercase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase , torchify=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , torch.Tensor )
# Test not batched input
_lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
_lowerCAmelCase , _lowerCAmelCase = self.image_processor_tester.get_expected_values(_lowercase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_lowerCAmelCase = image_processing(_lowercase , return_tensors="""pt""" ).pixel_values
_lowerCAmelCase , _lowerCAmelCase = self.image_processor_tester.get_expected_values(_lowercase , batched=_lowercase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
| 5
|
'''simple docstring'''
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
UpperCAmelCase_ : Any = logging.get_logger(__name__)
UpperCAmelCase_ : str = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
UpperCAmelCase_ : str = {
'''vocab_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json''',
},
'''merges_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json''',
},
}
UpperCAmelCase_ : Union[str, Any] = {
'''allenai/led-base-16384''': 1_6_3_8_4,
}
class lowerCAmelCase ( __lowerCAmelCase):
__lowercase : Dict = VOCAB_FILES_NAMES
__lowercase : Dict = PRETRAINED_VOCAB_FILES_MAP
__lowercase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase : Union[str, Any] = LEDTokenizer
__lowercase : int = ['''input_ids''', '''attention_mask''']
def __init__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE="replace" , __SCREAMING_SNAKE_CASE="<s>" , __SCREAMING_SNAKE_CASE="</s>" , __SCREAMING_SNAKE_CASE="</s>" , __SCREAMING_SNAKE_CASE="<s>" , __SCREAMING_SNAKE_CASE="<unk>" , __SCREAMING_SNAKE_CASE="<pad>" , __SCREAMING_SNAKE_CASE="<mask>" , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=True , **__SCREAMING_SNAKE_CASE , ) -> List[Any]:
'''simple docstring'''
super().__init__(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , tokenizer_file=__SCREAMING_SNAKE_CASE , errors=__SCREAMING_SNAKE_CASE , bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
__snake_case = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , __SCREAMING_SNAKE_CASE ) != add_prefix_space:
__snake_case = getattr(__SCREAMING_SNAKE_CASE , pre_tok_state.pop('''type''' ) )
__snake_case = add_prefix_space
__snake_case = pre_tok_class(**__SCREAMING_SNAKE_CASE )
__snake_case = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
__snake_case = '''post_processor'''
__snake_case = getattr(self.backend_tokenizer , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if tokenizer_component_instance:
__snake_case = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
__snake_case = tuple(state['''sep'''] )
if "cls" in state:
__snake_case = tuple(state['''cls'''] )
__snake_case = False
if state.get('''add_prefix_space''' , __SCREAMING_SNAKE_CASE ) != add_prefix_space:
__snake_case = add_prefix_space
__snake_case = True
if state.get('''trim_offsets''' , __SCREAMING_SNAKE_CASE ) != trim_offsets:
__snake_case = trim_offsets
__snake_case = True
if changes_to_apply:
__snake_case = getattr(__SCREAMING_SNAKE_CASE , state.pop('''type''' ) )
__snake_case = component_class(**__SCREAMING_SNAKE_CASE )
setattr(self.backend_tokenizer , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def lowerCAmelCase ( self ) -> str:
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
__snake_case = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else value
__snake_case = value
def lowerCAmelCase ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> BatchEncoding:
'''simple docstring'''
__snake_case = kwargs.get('''is_split_into_words''' , __SCREAMING_SNAKE_CASE )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'''to use it with pretokenized inputs.''' )
return super()._batch_encode_plus(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> BatchEncoding:
'''simple docstring'''
__snake_case = kwargs.get('''is_split_into_words''' , __SCREAMING_SNAKE_CASE )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'''to use it with pretokenized inputs.''' )
return super()._encode_plus(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ) -> Tuple[str]:
'''simple docstring'''
__snake_case = self._tokenizer.model.save(__SCREAMING_SNAKE_CASE , name=__SCREAMING_SNAKE_CASE )
return tuple(__SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ) -> List[Any]:
'''simple docstring'''
__snake_case = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ) -> List[int]:
'''simple docstring'''
__snake_case = [self.sep_token_id]
__snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = PaddingStrategy.DO_NOT_PAD , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , ) -> dict:
'''simple docstring'''
__snake_case = super()._pad(
encoded_inputs=__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , padding_strategy=__SCREAMING_SNAKE_CASE , pad_to_multiple_of=__SCREAMING_SNAKE_CASE , return_attention_mask=__SCREAMING_SNAKE_CASE , )
# Load from model defaults
if return_attention_mask is None:
__snake_case = '''attention_mask''' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
__snake_case = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
__snake_case = len(encoded_inputs['''global_attention_mask'''] ) != len(__SCREAMING_SNAKE_CASE )
if needs_to_be_padded:
__snake_case = len(__SCREAMING_SNAKE_CASE ) - len(encoded_inputs['''global_attention_mask'''] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
__snake_case = (
encoded_inputs['''global_attention_mask'''] + [-1] * difference
)
elif self.padding_side == "left":
__snake_case = [-1] * difference + encoded_inputs[
'''global_attention_mask'''
]
else:
raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) )
return encoded_inputs
| 24
| 0
|
"""simple docstring"""
from __future__ import annotations
import math
def lowercase ( A_ )-> Any:
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowerCamelCase_ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
__lowercase = [num for num in range(3, 100001, 2) if not is_prime(num)]
def lowercase ( A_ )-> Optional[Any]:
'''simple docstring'''
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise ValueError("n must be an integer" )
if n <= 0:
raise ValueError("n must be >= 0" )
a : str = []
for num in range(len(lowerCamelCase_ ) ):
a : List[Any] = 0
while 2 * i * i <= odd_composites[num]:
a : str = odd_composites[num] - 2 * i * i
if is_prime(lowerCamelCase_ ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(lowerCamelCase_ ) == n:
return list_nums
return []
def lowercase ( )-> Dict:
'''simple docstring'''
return compute_nums(1 )[0]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 712
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowercase = {"""configuration_wavlm""": ["""WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """WavLMConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
"""WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""WavLMForAudioFrameClassification""",
"""WavLMForCTC""",
"""WavLMForSequenceClassification""",
"""WavLMForXVector""",
"""WavLMModel""",
"""WavLMPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavlm import (
WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST,
WavLMForAudioFrameClassification,
WavLMForCTC,
WavLMForSequenceClassification,
WavLMForXVector,
WavLMModel,
WavLMPreTrainedModel,
)
else:
import sys
__lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 135
| 0
|
'''simple docstring'''
import argparse
import hashlib # hashlib is only used inside the Test class
import struct
class A :
def __init__( self : Optional[Any] , __magic_name__ : Any ):
"""simple docstring"""
lowerCAmelCase__ = data
lowerCAmelCase__ = [0X6745_2301, 0XEFCD_AB89, 0X98BA_DCFE, 0X1032_5476, 0XC3D2_E1F0]
@staticmethod
def __SCREAMING_SNAKE_CASE ( __magic_name__ : Optional[int] , __magic_name__ : str ):
"""simple docstring"""
return ((n << b) | (n >> (32 - b))) & 0XFFFF_FFFF
def __SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
lowerCAmelCase__ = B"\x80" + B"\x00" * (63 - (len(self.data ) + 8) % 64)
lowerCAmelCase__ = self.data + padding + struct.pack(">Q" , 8 * len(self.data ) )
return padded_data
def __SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
return [
self.padded_data[i : i + 64] for i in range(0 , len(self.padded_data ) , 64 )
]
def __SCREAMING_SNAKE_CASE ( self : Any , __magic_name__ : Union[str, Any] ):
"""simple docstring"""
lowerCAmelCase__ = list(struct.unpack(">16L" , __magic_name__ ) ) + [0] * 64
for i in range(16 , 80 ):
lowerCAmelCase__ = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]) , 1 )
return w
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
lowerCAmelCase__ = self.padding()
lowerCAmelCase__ = self.split_blocks()
for block in self.blocks:
lowerCAmelCase__ = self.expand_block(__magic_name__ )
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ = self.h
for i in range(0 , 80 ):
if 0 <= i < 20:
lowerCAmelCase__ = (b & c) | ((~b) & d)
lowerCAmelCase__ = 0X5A82_7999
elif 20 <= i < 40:
lowerCAmelCase__ = b ^ c ^ d
lowerCAmelCase__ = 0X6ED9_EBA1
elif 40 <= i < 60:
lowerCAmelCase__ = (b & c) | (b & d) | (c & d)
lowerCAmelCase__ = 0X8F1B_BCDC
elif 60 <= i < 80:
lowerCAmelCase__ = b ^ c ^ d
lowerCAmelCase__ = 0XCA62_C1D6
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ = (
self.rotate(__magic_name__ , 5 ) + f + e + k + expanded_block[i] & 0XFFFF_FFFF,
a,
self.rotate(__magic_name__ , 30 ),
c,
d,
)
lowerCAmelCase__ = (
self.h[0] + a & 0XFFFF_FFFF,
self.h[1] + b & 0XFFFF_FFFF,
self.h[2] + c & 0XFFFF_FFFF,
self.h[3] + d & 0XFFFF_FFFF,
self.h[4] + e & 0XFFFF_FFFF,
)
return ("{:08x}" * 5).format(*self.h )
def A ( ) -> int:
'''simple docstring'''
lowerCAmelCase__ = B"Test String"
assert SHAaHash(UpperCamelCase_ ).final_hash() == hashlib.shaa(UpperCamelCase_ ).hexdigest() # noqa: S324
def A ( ) -> Dict:
'''simple docstring'''
lowerCAmelCase__ = argparse.ArgumentParser(description="Process some strings or files" )
parser.add_argument(
"--string" , dest="input_string" , default="Hello World!! Welcome to Cryptography" , help="Hash the string" , )
parser.add_argument("--file" , dest="input_file" , help="Hash contents of a file" )
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = args.input_string
# In any case hash input should be a bytestring
if args.input_file:
with open(args.input_file , "rb" ) as f:
lowerCAmelCase__ = f.read()
else:
lowerCAmelCase__ = bytes(UpperCamelCase_ , "utf-8" )
print(SHAaHash(UpperCamelCase_ ).final_hash() )
if __name__ == "__main__":
main()
import doctest
doctest.testmod()
| 48
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__magic_name__ : Optional[int] = {
"""configuration_biogpt""": ["""BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BioGptConfig"""],
"""tokenization_biogpt""": ["""BioGptTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : int = [
"""BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BioGptForCausalLM""",
"""BioGptForTokenClassification""",
"""BioGptForSequenceClassification""",
"""BioGptModel""",
"""BioGptPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
__magic_name__ : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 615
| 0
|
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
__UpperCamelCase : Dict = {
"sample_size": 32,
"in_channels": 3,
"out_channels": 3,
"layers_per_block": 2,
"num_class_embeds": 1000,
"block_out_channels": [32, 64],
"attention_head_dim": 8,
"down_block_types": [
"ResnetDownsampleBlock2D",
"AttnDownBlock2D",
],
"up_block_types": [
"AttnUpBlock2D",
"ResnetUpsampleBlock2D",
],
"resnet_time_scale_shift": "scale_shift",
"upsample_type": "resnet",
"downsample_type": "resnet",
}
__UpperCamelCase : str = {
"sample_size": 64,
"in_channels": 3,
"out_channels": 3,
"layers_per_block": 3,
"num_class_embeds": 1000,
"block_out_channels": [192, 192 * 2, 192 * 3, 192 * 4],
"attention_head_dim": 64,
"down_block_types": [
"ResnetDownsampleBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
],
"up_block_types": [
"AttnUpBlock2D",
"AttnUpBlock2D",
"AttnUpBlock2D",
"ResnetUpsampleBlock2D",
],
"resnet_time_scale_shift": "scale_shift",
"upsample_type": "resnet",
"downsample_type": "resnet",
}
__UpperCamelCase : Tuple = {
"sample_size": 256,
"in_channels": 3,
"out_channels": 3,
"layers_per_block": 2,
"num_class_embeds": None,
"block_out_channels": [256, 256, 256 * 2, 256 * 2, 256 * 4, 256 * 4],
"attention_head_dim": 64,
"down_block_types": [
"ResnetDownsampleBlock2D",
"ResnetDownsampleBlock2D",
"ResnetDownsampleBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
],
"up_block_types": [
"AttnUpBlock2D",
"AttnUpBlock2D",
"AttnUpBlock2D",
"ResnetUpsampleBlock2D",
"ResnetUpsampleBlock2D",
"ResnetUpsampleBlock2D",
],
"resnet_time_scale_shift": "default",
"upsample_type": "resnet",
"downsample_type": "resnet",
}
__UpperCamelCase : List[Any] = {
"num_train_timesteps": 40,
"sigma_min": 0.0_02,
"sigma_max": 80.0,
}
__UpperCamelCase : Dict = {
"num_train_timesteps": 201,
"sigma_min": 0.0_02,
"sigma_max": 80.0,
}
__UpperCamelCase : Optional[Any] = {
"num_train_timesteps": 151,
"sigma_min": 0.0_02,
"sigma_max": 80.0,
}
def _a ( SCREAMING_SNAKE_CASE : Tuple ):
"""simple docstring"""
if isinstance(snake_case__ , snake_case__ ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError('''boolean value expected''' )
def _a ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Union[str, Any]=False ):
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = checkpoint[F"{old_prefix}.in_layers.0.weight"]
UpperCamelCase__ : int = checkpoint[F"{old_prefix}.in_layers.0.bias"]
UpperCamelCase__ : int = checkpoint[F"{old_prefix}.in_layers.2.weight"]
UpperCamelCase__ : Tuple = checkpoint[F"{old_prefix}.in_layers.2.bias"]
UpperCamelCase__ : List[Any] = checkpoint[F"{old_prefix}.emb_layers.1.weight"]
UpperCamelCase__ : Any = checkpoint[F"{old_prefix}.emb_layers.1.bias"]
UpperCamelCase__ : str = checkpoint[F"{old_prefix}.out_layers.0.weight"]
UpperCamelCase__ : Any = checkpoint[F"{old_prefix}.out_layers.0.bias"]
UpperCamelCase__ : Dict = checkpoint[F"{old_prefix}.out_layers.3.weight"]
UpperCamelCase__ : Optional[Any] = checkpoint[F"{old_prefix}.out_layers.3.bias"]
if has_skip:
UpperCamelCase__ : Dict = checkpoint[F"{old_prefix}.skip_connection.weight"]
UpperCamelCase__ : str = checkpoint[F"{old_prefix}.skip_connection.bias"]
return new_checkpoint
def _a ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Optional[Any]=None ):
"""simple docstring"""
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ : List[str] = checkpoint[F"{old_prefix}.qkv.weight"].chunk(3 , dim=0 )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ : Optional[Any] = checkpoint[F"{old_prefix}.qkv.bias"].chunk(3 , dim=0 )
UpperCamelCase__ : List[str] = checkpoint[F"{old_prefix}.norm.weight"]
UpperCamelCase__ : List[str] = checkpoint[F"{old_prefix}.norm.bias"]
UpperCamelCase__ : Any = weight_q.squeeze(-1 ).squeeze(-1 )
UpperCamelCase__ : Optional[Any] = bias_q.squeeze(-1 ).squeeze(-1 )
UpperCamelCase__ : Dict = weight_k.squeeze(-1 ).squeeze(-1 )
UpperCamelCase__ : Tuple = bias_k.squeeze(-1 ).squeeze(-1 )
UpperCamelCase__ : Any = weight_v.squeeze(-1 ).squeeze(-1 )
UpperCamelCase__ : Any = bias_v.squeeze(-1 ).squeeze(-1 )
UpperCamelCase__ : Optional[int] = (
checkpoint[F"{old_prefix}.proj_out.weight"].squeeze(-1 ).squeeze(-1 )
)
UpperCamelCase__ : List[str] = checkpoint[F"{old_prefix}.proj_out.bias"].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def _a ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[int] ):
"""simple docstring"""
UpperCamelCase__ : Any = torch.load(snake_case__ , map_location='''cpu''' )
UpperCamelCase__ : List[str] = {}
UpperCamelCase__ : str = checkpoint['''time_embed.0.weight''']
UpperCamelCase__ : Union[str, Any] = checkpoint['''time_embed.0.bias''']
UpperCamelCase__ : Union[str, Any] = checkpoint['''time_embed.2.weight''']
UpperCamelCase__ : Dict = checkpoint['''time_embed.2.bias''']
if unet_config["num_class_embeds"] is not None:
UpperCamelCase__ : int = checkpoint['''label_emb.weight''']
UpperCamelCase__ : Dict = checkpoint['''input_blocks.0.0.weight''']
UpperCamelCase__ : str = checkpoint['''input_blocks.0.0.bias''']
UpperCamelCase__ : List[Any] = unet_config['''down_block_types''']
UpperCamelCase__ : int = unet_config['''layers_per_block''']
UpperCamelCase__ : Union[str, Any] = unet_config['''attention_head_dim''']
UpperCamelCase__ : Optional[int] = unet_config['''block_out_channels''']
UpperCamelCase__ : int = 1
UpperCamelCase__ : str = channels_list[0]
for i, layer_type in enumerate(snake_case__ ):
UpperCamelCase__ : Dict = channels_list[i]
UpperCamelCase__ : Optional[Any] = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(snake_case__ ):
UpperCamelCase__ : Tuple = F"down_blocks.{i}.resnets.{j}"
UpperCamelCase__ : int = F"input_blocks.{current_layer}.0"
UpperCamelCase__ : List[str] = True if j == 0 and downsample_block_has_skip else False
UpperCamelCase__ : int = convert_resnet(snake_case__ , snake_case__ , snake_case__ , snake_case__ , has_skip=snake_case__ )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(snake_case__ ):
UpperCamelCase__ : Optional[Any] = F"down_blocks.{i}.resnets.{j}"
UpperCamelCase__ : Tuple = F"input_blocks.{current_layer}.0"
UpperCamelCase__ : str = True if j == 0 and downsample_block_has_skip else False
UpperCamelCase__ : Tuple = convert_resnet(snake_case__ , snake_case__ , snake_case__ , snake_case__ , has_skip=snake_case__ )
UpperCamelCase__ : Optional[Any] = F"down_blocks.{i}.attentions.{j}"
UpperCamelCase__ : Tuple = F"input_blocks.{current_layer}.1"
UpperCamelCase__ : List[str] = convert_attention(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
current_layer += 1
if i != len(snake_case__ ) - 1:
UpperCamelCase__ : Optional[Any] = F"down_blocks.{i}.downsamplers.0"
UpperCamelCase__ : List[str] = F"input_blocks.{current_layer}.0"
UpperCamelCase__ : Optional[Any] = convert_resnet(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
current_layer += 1
UpperCamelCase__ : str = current_channels
# hardcoded the mid-block for now
UpperCamelCase__ : List[str] = '''mid_block.resnets.0'''
UpperCamelCase__ : Tuple = '''middle_block.0'''
UpperCamelCase__ : List[Any] = convert_resnet(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
UpperCamelCase__ : Optional[int] = '''mid_block.attentions.0'''
UpperCamelCase__ : str = '''middle_block.1'''
UpperCamelCase__ : str = convert_attention(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
UpperCamelCase__ : Dict = '''mid_block.resnets.1'''
UpperCamelCase__ : Dict = '''middle_block.2'''
UpperCamelCase__ : Optional[Any] = convert_resnet(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
UpperCamelCase__ : List[str] = 0
UpperCamelCase__ : Optional[int] = unet_config['''up_block_types''']
for i, layer_type in enumerate(snake_case__ ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
UpperCamelCase__ : int = F"up_blocks.{i}.resnets.{j}"
UpperCamelCase__ : Tuple = F"output_blocks.{current_layer}.0"
UpperCamelCase__ : Any = convert_resnet(snake_case__ , snake_case__ , snake_case__ , snake_case__ , has_skip=snake_case__ )
current_layer += 1
if i != len(snake_case__ ) - 1:
UpperCamelCase__ : int = F"up_blocks.{i}.upsamplers.0"
UpperCamelCase__ : Any = F"output_blocks.{current_layer-1}.1"
UpperCamelCase__ : Tuple = convert_resnet(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
UpperCamelCase__ : Tuple = F"up_blocks.{i}.resnets.{j}"
UpperCamelCase__ : Dict = F"output_blocks.{current_layer}.0"
UpperCamelCase__ : Optional[Any] = convert_resnet(snake_case__ , snake_case__ , snake_case__ , snake_case__ , has_skip=snake_case__ )
UpperCamelCase__ : Dict = F"up_blocks.{i}.attentions.{j}"
UpperCamelCase__ : Optional[Any] = F"output_blocks.{current_layer}.1"
UpperCamelCase__ : List[Any] = convert_attention(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
current_layer += 1
if i != len(snake_case__ ) - 1:
UpperCamelCase__ : Optional[int] = F"up_blocks.{i}.upsamplers.0"
UpperCamelCase__ : List[str] = F"output_blocks.{current_layer-1}.2"
UpperCamelCase__ : Optional[int] = convert_resnet(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
UpperCamelCase__ : str = checkpoint['''out.0.weight''']
UpperCamelCase__ : List[Any] = checkpoint['''out.0.bias''']
UpperCamelCase__ : List[Any] = checkpoint['''out.2.weight''']
UpperCamelCase__ : Optional[Any] = checkpoint['''out.2.bias''']
return new_checkpoint
if __name__ == "__main__":
__UpperCamelCase : str = argparse.ArgumentParser()
parser.add_argument("--unet_path", default=None, type=str, required=True, help="Path to the unet.pt to convert.")
parser.add_argument(
"--dump_path", default=None, type=str, required=True, help="Path to output the converted UNet model."
)
parser.add_argument("--class_cond", default=True, type=str, help="Whether the model is class-conditional.")
__UpperCamelCase : int = parser.parse_args()
__UpperCamelCase : Optional[Any] = strabool(args.class_cond)
__UpperCamelCase : Optional[Any] = os.path.basename(args.unet_path)
print(f"Checkpoint: {ckpt_name}")
# Get U-Net config
if "imagenet64" in ckpt_name:
__UpperCamelCase : List[Any] = IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
__UpperCamelCase : List[Any] = LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
__UpperCamelCase : List[str] = TEST_UNET_CONFIG
else:
raise ValueError(f"Checkpoint type {ckpt_name} is not currently supported.")
if not args.class_cond:
__UpperCamelCase : Tuple = None
__UpperCamelCase : List[str] = con_pt_to_diffuser(args.unet_path, unet_config)
__UpperCamelCase : Union[str, Any] = UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
__UpperCamelCase : List[Any] = CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
__UpperCamelCase : Optional[Any] = CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
__UpperCamelCase : str = CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(f"Checkpoint type {ckpt_name} is not currently supported.")
__UpperCamelCase : Tuple = CMStochasticIterativeScheduler(**scheduler_config)
__UpperCamelCase : Optional[int] = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path)
| 717
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCamelCase : Any = logging.get_logger(__name__)
__UpperCamelCase : Any = {
"andreasmadsen/efficient_mlm_m0.40": (
"https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json"
),
}
class __magic_name__ ( __lowerCAmelCase):
A: List[Any] = "roberta-prelayernorm"
def __init__( self : Tuple , lowerCamelCase__ : List[Any]=50265 , lowerCamelCase__ : Optional[Any]=768 , lowerCamelCase__ : str=12 , lowerCamelCase__ : Union[str, Any]=12 , lowerCamelCase__ : Dict=3072 , lowerCamelCase__ : int="gelu" , lowerCamelCase__ : Tuple=0.1 , lowerCamelCase__ : Tuple=0.1 , lowerCamelCase__ : List[str]=512 , lowerCamelCase__ : int=2 , lowerCamelCase__ : Tuple=0.02 , lowerCamelCase__ : List[Any]=1E-1_2 , lowerCamelCase__ : str=1 , lowerCamelCase__ : int=0 , lowerCamelCase__ : int=2 , lowerCamelCase__ : Union[str, Any]="absolute" , lowerCamelCase__ : Optional[Any]=True , lowerCamelCase__ : Dict=None , **lowerCamelCase__ : Any , ) -> Optional[int]:
'''simple docstring'''
super().__init__(pad_token_id=lowerCamelCase__ , bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ , **lowerCamelCase__ )
UpperCamelCase__ : Optional[int] = vocab_size
UpperCamelCase__ : Union[str, Any] = hidden_size
UpperCamelCase__ : List[str] = num_hidden_layers
UpperCamelCase__ : Optional[int] = num_attention_heads
UpperCamelCase__ : List[str] = hidden_act
UpperCamelCase__ : Optional[int] = intermediate_size
UpperCamelCase__ : Optional[int] = hidden_dropout_prob
UpperCamelCase__ : List[str] = attention_probs_dropout_prob
UpperCamelCase__ : Optional[int] = max_position_embeddings
UpperCamelCase__ : Optional[Any] = type_vocab_size
UpperCamelCase__ : Union[str, Any] = initializer_range
UpperCamelCase__ : Dict = layer_norm_eps
UpperCamelCase__ : Union[str, Any] = position_embedding_type
UpperCamelCase__ : Optional[int] = use_cache
UpperCamelCase__ : int = classifier_dropout
class __magic_name__ ( __lowerCAmelCase):
@property
def UpperCAmelCase__ ( self : int ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
UpperCamelCase__ : Dict = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
UpperCamelCase__ : Any = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 106
| 0
|
"""simple docstring"""
import inspect
import unittest
from transformers import SegformerConfig, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_MAPPING,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerModel,
)
from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import SegformerImageProcessor
class a__ ( A__ ):
def lowerCamelCase_ ( self :List[Any] ):
'''simple docstring'''
UpperCamelCase_ : List[str] =self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_lowerCamelCase , 'hidden_sizes' ) )
self.parent.assertTrue(hasattr(_lowerCamelCase , 'num_attention_heads' ) )
self.parent.assertTrue(hasattr(_lowerCamelCase , 'num_encoder_blocks' ) )
class a__ :
def __init__( self :Optional[int] , _lowerCamelCase :List[Any] , _lowerCamelCase :Optional[int]=13 , _lowerCamelCase :List[str]=64 , _lowerCamelCase :int=3 , _lowerCamelCase :int=4 , _lowerCamelCase :Tuple=[2, 2, 2, 2] , _lowerCamelCase :Optional[Any]=[8, 4, 2, 1] , _lowerCamelCase :Union[str, Any]=[16, 32, 64, 128] , _lowerCamelCase :Dict=[1, 4, 8, 16] , _lowerCamelCase :Optional[Any]=[1, 2, 4, 8] , _lowerCamelCase :Tuple=True , _lowerCamelCase :Any=True , _lowerCamelCase :Union[str, Any]="gelu" , _lowerCamelCase :Optional[Any]=0.1 , _lowerCamelCase :List[str]=0.1 , _lowerCamelCase :Dict=0.02 , _lowerCamelCase :int=3 , _lowerCamelCase :Optional[Any]=None , ):
'''simple docstring'''
UpperCamelCase_ : int =parent
UpperCamelCase_ : int =batch_size
UpperCamelCase_ : int =image_size
UpperCamelCase_ : Optional[int] =num_channels
UpperCamelCase_ : Optional[int] =num_encoder_blocks
UpperCamelCase_ : Optional[int] =sr_ratios
UpperCamelCase_ : str =depths
UpperCamelCase_ : Tuple =hidden_sizes
UpperCamelCase_ : List[Any] =downsampling_rates
UpperCamelCase_ : Optional[int] =num_attention_heads
UpperCamelCase_ : str =is_training
UpperCamelCase_ : str =use_labels
UpperCamelCase_ : int =hidden_act
UpperCamelCase_ : str =hidden_dropout_prob
UpperCamelCase_ : Optional[int] =attention_probs_dropout_prob
UpperCamelCase_ : List[Any] =initializer_range
UpperCamelCase_ : Tuple =num_labels
UpperCamelCase_ : List[Any] =scope
def lowerCamelCase_ ( self :Optional[Any] ):
'''simple docstring'''
UpperCamelCase_ : List[Any] =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase_ : List[str] =None
if self.use_labels:
UpperCamelCase_ : Tuple =ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
UpperCamelCase_ : str =self.get_config()
return config, pixel_values, labels
def lowerCamelCase_ ( self :List[Any] ):
'''simple docstring'''
return SegformerConfig(
image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def lowerCamelCase_ ( self :int , _lowerCamelCase :Dict , _lowerCamelCase :str , _lowerCamelCase :Union[str, Any] ):
'''simple docstring'''
UpperCamelCase_ : Any =SegformerModel(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
UpperCamelCase_ : List[Any] =model(_lowerCamelCase )
UpperCamelCase_ : List[Any] =self.image_size // (self.downsampling_rates[-1] * 2)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) )
def lowerCamelCase_ ( self :List[str] , _lowerCamelCase :int , _lowerCamelCase :Union[str, Any] , _lowerCamelCase :List[str] ):
'''simple docstring'''
UpperCamelCase_ : Any =self.num_labels
UpperCamelCase_ : int =SegformerForSemanticSegmentation(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
UpperCamelCase_ : str =model(_lowerCamelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
UpperCamelCase_ : Optional[Any] =model(_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
self.parent.assertGreater(result.loss , 0.0 )
def lowerCamelCase_ ( self :Tuple , _lowerCamelCase :Optional[Any] , _lowerCamelCase :Optional[int] , _lowerCamelCase :List[Any] ):
'''simple docstring'''
UpperCamelCase_ : Tuple =1
UpperCamelCase_ : Dict =SegformerForSemanticSegmentation(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
UpperCamelCase_ : str =torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size) ).to(_lowerCamelCase )
UpperCamelCase_ : str =model(_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertGreater(result.loss , 0.0 )
def lowerCamelCase_ ( self :List[str] ):
'''simple docstring'''
UpperCamelCase_ : Union[str, Any] =self.prepare_config_and_inputs()
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ : Optional[int] =config_and_inputs
UpperCamelCase_ : Optional[int] ={'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class a__ ( A__ , A__ , unittest.TestCase ):
UpperCAmelCase__ = (
(
SegformerModel,
SegformerForSemanticSegmentation,
SegformerForImageClassification,
)
if is_torch_available()
else ()
)
UpperCAmelCase__ = (
{
'''feature-extraction''': SegformerModel,
'''image-classification''': SegformerForImageClassification,
'''image-segmentation''': SegformerForSemanticSegmentation,
}
if is_torch_available()
else {}
)
UpperCAmelCase__ = True
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
def lowerCamelCase_ ( self :List[Any] ):
'''simple docstring'''
UpperCamelCase_ : Dict =SegformerModelTester(self )
UpperCamelCase_ : str =SegformerConfigTester(self , config_class=_lowerCamelCase )
def lowerCamelCase_ ( self :int ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self :int ):
'''simple docstring'''
UpperCamelCase_ : int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase )
def lowerCamelCase_ ( self :Optional[Any] ):
'''simple docstring'''
UpperCamelCase_ : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_binary_image_segmentation(*_lowerCamelCase )
def lowerCamelCase_ ( self :Dict ):
'''simple docstring'''
UpperCamelCase_ : Optional[int] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_segmentation(*_lowerCamelCase )
@unittest.skip('SegFormer does not use inputs_embeds' )
def lowerCamelCase_ ( self :Optional[int] ):
'''simple docstring'''
pass
@unittest.skip('SegFormer does not have get_input_embeddings method and get_output_embeddings methods' )
def lowerCamelCase_ ( self :Union[str, Any] ):
'''simple docstring'''
pass
def lowerCamelCase_ ( self :Any ):
'''simple docstring'''
UpperCamelCase_ , UpperCamelCase_ : Tuple =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_ : Any =model_class(_lowerCamelCase )
UpperCamelCase_ : Union[str, Any] =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase_ : Any =[*signature.parameters.keys()]
UpperCamelCase_ : Optional[int] =['pixel_values']
self.assertListEqual(arg_names[:1] , _lowerCamelCase )
def lowerCamelCase_ ( self :int ):
'''simple docstring'''
UpperCamelCase_ , UpperCamelCase_ : Optional[Any] =self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase_ : Tuple =True
for model_class in self.all_model_classes:
UpperCamelCase_ : int =True
UpperCamelCase_ : Union[str, Any] =False
UpperCamelCase_ : Tuple =True
UpperCamelCase_ : str =model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
with torch.no_grad():
UpperCamelCase_ : List[str] =model(**self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) )
UpperCamelCase_ : Optional[Any] =outputs.attentions
UpperCamelCase_ : Dict =sum(self.model_tester.depths )
self.assertEqual(len(_lowerCamelCase ) , _lowerCamelCase )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
UpperCamelCase_ : str =True
UpperCamelCase_ : Tuple =model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
with torch.no_grad():
UpperCamelCase_ : List[Any] =model(**self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) )
UpperCamelCase_ : Tuple =outputs.attentions
self.assertEqual(len(_lowerCamelCase ) , _lowerCamelCase )
# verify the first attentions (first block, first layer)
UpperCamelCase_ : int =(self.model_tester.image_size // 4) ** 2
UpperCamelCase_ : str =(self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
# verify the last attentions (last block, last layer)
UpperCamelCase_ : Optional[Any] =(self.model_tester.image_size // 32) ** 2
UpperCamelCase_ : Any =(self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2
self.assertListEqual(
list(attentions[-1].shape[-3:] ) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , )
UpperCamelCase_ : str =len(_lowerCamelCase )
# Check attention is always last and order is fine
UpperCamelCase_ : str =True
UpperCamelCase_ : List[str] =True
UpperCamelCase_ : int =model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
with torch.no_grad():
UpperCamelCase_ : str =model(**self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) )
self.assertEqual(out_len + 1 , len(_lowerCamelCase ) )
UpperCamelCase_ : Optional[Any] =outputs.attentions
self.assertEqual(len(_lowerCamelCase ) , _lowerCamelCase )
# verify the first attentions (first block, first layer)
UpperCamelCase_ : List[Any] =(self.model_tester.image_size // 4) ** 2
UpperCamelCase_ : Dict =(self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
def lowerCamelCase_ ( self :Union[str, Any] ):
'''simple docstring'''
def check_hidden_states_output(_lowerCamelCase :Optional[Any] , _lowerCamelCase :List[Any] , _lowerCamelCase :List[str] ):
UpperCamelCase_ : Dict =model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
with torch.no_grad():
UpperCamelCase_ : List[str] =model(**self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) )
UpperCamelCase_ : Optional[int] =outputs.hidden_states
UpperCamelCase_ : List[Any] =self.model_tester.num_encoder_blocks
self.assertEqual(len(_lowerCamelCase ) , _lowerCamelCase )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.hidden_sizes[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
UpperCamelCase_ , UpperCamelCase_ : str =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_ : Union[str, Any] =True
check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase_ : Union[str, Any] =True
check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def lowerCamelCase_ ( self :List[Any] ):
'''simple docstring'''
if not self.model_tester.is_training:
return
UpperCamelCase_ , UpperCamelCase_ : str =self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase_ : str =True
for model_class in self.all_model_classes:
if model_class in get_values(_lowerCamelCase ):
continue
UpperCamelCase_ : Union[str, Any] =model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.train()
UpperCamelCase_ : Tuple =self._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
UpperCamelCase_ : str =model(**_lowerCamelCase ).loss
loss.backward()
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def lowerCamelCase_ ( self :List[Any] ):
'''simple docstring'''
pass
@slow
def lowerCamelCase_ ( self :Tuple ):
'''simple docstring'''
for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase_ : List[Any] =SegformerModel.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
def A_ ( ):
UpperCamelCase_ : Tuple =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
class a__ ( unittest.TestCase ):
@slow
def lowerCamelCase_ ( self :int ):
'''simple docstring'''
UpperCamelCase_ : Tuple =SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=_lowerCamelCase , align=_lowerCamelCase , do_random_crop=_lowerCamelCase )
UpperCamelCase_ : Dict =SegformerForSemanticSegmentation.from_pretrained('nvidia/segformer-b0-finetuned-ade-512-512' ).to(
_lowerCamelCase )
UpperCamelCase_ : Dict =prepare_img()
UpperCamelCase_ : str =image_processor(images=_lowerCamelCase , return_tensors='pt' )
UpperCamelCase_ : int =encoded_inputs.pixel_values.to(_lowerCamelCase )
with torch.no_grad():
UpperCamelCase_ : Dict =model(_lowerCamelCase )
UpperCamelCase_ : Union[str, Any] =torch.Size((1, model.config.num_labels, 128, 128) )
self.assertEqual(outputs.logits.shape , _lowerCamelCase )
UpperCamelCase_ : List[str] =torch.tensor(
[
[[-4.6310, -5.5232, -6.2356], [-5.1921, -6.1444, -6.5996], [-5.4424, -6.2790, -6.7574]],
[[-12.1391, -13.3122, -13.9554], [-12.8732, -13.9352, -14.3563], [-12.9438, -13.8226, -14.2513]],
[[-12.5134, -13.4686, -14.4915], [-12.8669, -14.4343, -14.7758], [-13.2523, -14.5819, -15.0694]],
] ).to(_lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , _lowerCamelCase , atol=1E-4 ) )
@slow
def lowerCamelCase_ ( self :str ):
'''simple docstring'''
UpperCamelCase_ : int =SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=_lowerCamelCase , align=_lowerCamelCase , do_random_crop=_lowerCamelCase )
UpperCamelCase_ : Any =SegformerForSemanticSegmentation.from_pretrained(
'nvidia/segformer-b1-finetuned-cityscapes-1024-1024' ).to(_lowerCamelCase )
UpperCamelCase_ : Any =prepare_img()
UpperCamelCase_ : Tuple =image_processor(images=_lowerCamelCase , return_tensors='pt' )
UpperCamelCase_ : Any =encoded_inputs.pixel_values.to(_lowerCamelCase )
with torch.no_grad():
UpperCamelCase_ : Any =model(_lowerCamelCase )
UpperCamelCase_ : Optional[int] =torch.Size((1, model.config.num_labels, 128, 128) )
self.assertEqual(outputs.logits.shape , _lowerCamelCase )
UpperCamelCase_ : Any =torch.tensor(
[
[[-13.5748, -13.9111, -12.6500], [-14.3500, -15.3683, -14.2328], [-14.7532, -16.0424, -15.6087]],
[[-17.1651, -15.8725, -12.9653], [-17.2580, -17.3718, -14.8223], [-16.6058, -16.8783, -16.7452]],
[[-3.6456, -3.0209, -1.4203], [-3.0797, -3.1959, -2.0000], [-1.8757, -1.9217, -1.6997]],
] ).to(_lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , _lowerCamelCase , atol=1E-1 ) )
@slow
def lowerCamelCase_ ( self :List[str] ):
'''simple docstring'''
UpperCamelCase_ : int =SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=_lowerCamelCase , align=_lowerCamelCase , do_random_crop=_lowerCamelCase )
UpperCamelCase_ : int =SegformerForSemanticSegmentation.from_pretrained('nvidia/segformer-b0-finetuned-ade-512-512' ).to(
_lowerCamelCase )
UpperCamelCase_ : Any =prepare_img()
UpperCamelCase_ : int =image_processor(images=_lowerCamelCase , return_tensors='pt' )
UpperCamelCase_ : List[Any] =encoded_inputs.pixel_values.to(_lowerCamelCase )
with torch.no_grad():
UpperCamelCase_ : int =model(_lowerCamelCase )
UpperCamelCase_ : int =outputs.logits.detach().cpu()
UpperCamelCase_ : Any =image_processor.post_process_semantic_segmentation(outputs=_lowerCamelCase , target_sizes=[(500, 300)] )
UpperCamelCase_ : Any =torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape , _lowerCamelCase )
UpperCamelCase_ : Optional[Any] =image_processor.post_process_semantic_segmentation(outputs=_lowerCamelCase )
UpperCamelCase_ : Any =torch.Size((128, 128) )
self.assertEqual(segmentation[0].shape , _lowerCamelCase )
| 357
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__SCREAMING_SNAKE_CASE = {
'configuration_mega': ['MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MegaConfig', 'MegaOnnxConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE = [
'MEGA_PRETRAINED_MODEL_ARCHIVE_LIST',
'MegaForCausalLM',
'MegaForMaskedLM',
'MegaForMultipleChoice',
'MegaForQuestionAnswering',
'MegaForSequenceClassification',
'MegaForTokenClassification',
'MegaModel',
'MegaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 357
| 1
|
'''simple docstring'''
import tensorflow as tf
from ...tf_utils import shape_list
class __A ( tf.keras.layers.Layer ):
def __init__( self : Any , lowerCamelCase : str , lowerCamelCase : Tuple , lowerCamelCase : Any , lowerCamelCase : Any , lowerCamelCase : List[str]=1 , lowerCamelCase : Optional[Any]=False , **lowerCamelCase : Dict ):
"""simple docstring"""
super().__init__(**lowerCamelCase )
__A : Tuple = vocab_size
__A : Optional[Any] = d_embed
__A : List[str] = d_proj
__A : Union[str, Any] = cutoffs + [vocab_size]
__A : int = [0] + self.cutoffs
__A : int = div_val
__A : Dict = self.cutoffs[0]
__A : List[str] = len(self.cutoffs ) - 1
__A : Tuple = self.shortlist_size + self.n_clusters
__A : str = keep_order
__A : Union[str, Any] = []
__A : int = []
def lowercase_( self : Optional[int] , lowerCamelCase : Union[str, Any] ):
"""simple docstring"""
if self.n_clusters > 0:
__A : Union[str, Any] = self.add_weight(
shape=(self.n_clusters, self.d_embed) , initializer="""zeros""" , trainable=lowerCamelCase , name="""cluster_weight""" )
__A : Dict = self.add_weight(
shape=(self.n_clusters,) , initializer="""zeros""" , trainable=lowerCamelCase , name="""cluster_bias""" )
if self.div_val == 1:
for i in range(len(self.cutoffs ) ):
if self.d_proj != self.d_embed:
__A : Any = self.add_weight(
shape=(self.d_embed, self.d_proj) , initializer="""zeros""" , trainable=lowerCamelCase , name=f"out_projs_._{i}" , )
self.out_projs.append(lowerCamelCase )
else:
self.out_projs.append(lowerCamelCase )
__A : int = self.add_weight(
shape=(self.vocab_size, self.d_embed) , initializer="""zeros""" , trainable=lowerCamelCase , name=f"out_layers_._{i}_._weight" , )
__A : Tuple = self.add_weight(
shape=(self.vocab_size,) , initializer="""zeros""" , trainable=lowerCamelCase , name=f"out_layers_._{i}_._bias" , )
self.out_layers.append((weight, bias) )
else:
for i in range(len(self.cutoffs ) ):
__A : Any = self.cutoff_ends[i], self.cutoff_ends[i + 1]
__A : int = self.d_embed // (self.div_val**i)
__A : List[str] = self.add_weight(
shape=(d_emb_i, self.d_proj) , initializer="""zeros""" , trainable=lowerCamelCase , name=f"out_projs_._{i}" )
self.out_projs.append(lowerCamelCase )
__A : Tuple = self.add_weight(
shape=(r_idx - l_idx, d_emb_i) , initializer="""zeros""" , trainable=lowerCamelCase , name=f"out_layers_._{i}_._weight" , )
__A : Any = self.add_weight(
shape=(r_idx - l_idx,) , initializer="""zeros""" , trainable=lowerCamelCase , name=f"out_layers_._{i}_._bias" , )
self.out_layers.append((weight, bias) )
super().build(lowerCamelCase )
@staticmethod
def lowercase_( lowerCamelCase : Optional[int] , lowerCamelCase : int , lowerCamelCase : Tuple , lowerCamelCase : List[Any]=None ):
"""simple docstring"""
__A : Any = x
if proj is not None:
__A : Tuple = tf.einsum("""ibd,ed->ibe""" , lowerCamelCase , lowerCamelCase )
return tf.einsum("""ibd,nd->ibn""" , lowerCamelCase , lowerCamelCase ) + b
@staticmethod
def lowercase_( lowerCamelCase : Tuple , lowerCamelCase : Union[str, Any] ):
"""simple docstring"""
__A : Tuple = shape_list(lowerCamelCase )
__A : Tuple = tf.range(lp_size[0] , dtype=target.dtype )
__A : Dict = tf.stack([r, target] , 1 )
return tf.gather_nd(lowerCamelCase , lowerCamelCase )
def lowercase_( self : int , lowerCamelCase : Any , lowerCamelCase : int , lowerCamelCase : Optional[int]=True , lowerCamelCase : Optional[int]=False ):
"""simple docstring"""
__A : Any = 0
if self.n_clusters == 0:
__A : Union[str, Any] = self._logit(lowerCamelCase , self.out_layers[0][0] , self.out_layers[0][1] , self.out_projs[0] )
if target is not None:
__A : List[str] = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=lowerCamelCase , logits=lowerCamelCase )
__A : str = tf.nn.log_softmax(lowerCamelCase , axis=-1 )
else:
__A : Tuple = shape_list(lowerCamelCase )
__A : Any = []
__A : Optional[Any] = tf.zeros(hidden_sizes[:2] )
for i in range(len(self.cutoffs ) ):
__A : Dict = self.cutoff_ends[i], self.cutoff_ends[i + 1]
if target is not None:
__A : Optional[Any] = (target >= l_idx) & (target < r_idx)
__A : List[Any] = tf.where(lowerCamelCase )
__A : Optional[int] = tf.boolean_mask(lowerCamelCase , lowerCamelCase ) - l_idx
if self.div_val == 1:
__A : List[str] = self.out_layers[0][0][l_idx:r_idx]
__A : Union[str, Any] = self.out_layers[0][1][l_idx:r_idx]
else:
__A : str = self.out_layers[i][0]
__A : Any = self.out_layers[i][1]
if i == 0:
__A : str = tf.concat([cur_W, self.cluster_weight] , 0 )
__A : int = tf.concat([cur_b, self.cluster_bias] , 0 )
__A : List[str] = self._logit(lowerCamelCase , lowerCamelCase , lowerCamelCase , self.out_projs[0] )
__A : Dict = tf.nn.log_softmax(lowerCamelCase )
out.append(head_logprob[..., : self.cutoffs[0]] )
if target is not None:
__A : Tuple = tf.boolean_mask(lowerCamelCase , lowerCamelCase )
__A : Optional[int] = self._gather_logprob(lowerCamelCase , lowerCamelCase )
else:
__A : Union[str, Any] = self._logit(lowerCamelCase , lowerCamelCase , lowerCamelCase , self.out_projs[i] )
__A : str = tf.nn.log_softmax(lowerCamelCase )
__A : Union[str, Any] = self.cutoffs[0] + i - 1 # No probability for the head cluster
__A : Tuple = head_logprob[..., cluster_prob_idx, None] + tail_logprob
out.append(lowerCamelCase )
if target is not None:
__A : Any = tf.boolean_mask(lowerCamelCase , lowerCamelCase )
__A : Optional[int] = tf.boolean_mask(lowerCamelCase , lowerCamelCase )
__A : Union[str, Any] = self._gather_logprob(lowerCamelCase , lowerCamelCase )
cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1]
if target is not None:
loss += tf.scatter_nd(lowerCamelCase , -cur_logprob , shape_list(lowerCamelCase ) )
__A : Optional[int] = tf.concat(lowerCamelCase , axis=-1 )
if target is not None:
if return_mean:
__A : List[Any] = tf.reduce_mean(lowerCamelCase )
# Add the training-time loss value to the layer using `self.add_loss()`.
self.add_loss(lowerCamelCase )
# Log the loss as a metric (we could log arbitrary metrics,
# including different metrics for training and inference.
self.add_metric(lowerCamelCase , name=self.name , aggregation="""mean""" if return_mean else """""" )
return out
| 715
|
'''simple docstring'''
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
A__ : Any =logging.get_logger(__name__)
@add_end_docstrings(_SCREAMING_SNAKE_CASE )
class __A ( _SCREAMING_SNAKE_CASE ):
def __init__( self : Union[str, Any] , *lowerCamelCase : int , **lowerCamelCase : Optional[int] ):
"""simple docstring"""
super().__init__(*lowerCamelCase , **lowerCamelCase )
self.check_model_type(lowerCamelCase )
def lowercase_( self : Any , lowerCamelCase : Dict=None , lowerCamelCase : Union[str, Any]=None , lowerCamelCase : int=None , **lowerCamelCase : int ):
"""simple docstring"""
__A , __A : Tuple = {}, {}
if padding is not None:
__A : Any = padding
if truncation is not None:
__A : Optional[Any] = truncation
if top_k is not None:
__A : List[str] = top_k
return preprocess_params, {}, postprocess_params
def __call__( self : Any , lowerCamelCase : Union["Image.Image", str] , lowerCamelCase : str = None , **lowerCamelCase : Tuple ):
"""simple docstring"""
if isinstance(lowerCamelCase , (Image.Image, str) ) and isinstance(lowerCamelCase , lowerCamelCase ):
__A : Tuple = {"""image""": image, """question""": question}
else:
__A : List[Any] = image
__A : Any = super().__call__(lowerCamelCase , **lowerCamelCase )
return results
def lowercase_( self : int , lowerCamelCase : Optional[int] , lowerCamelCase : Dict=False , lowerCamelCase : str=False ):
"""simple docstring"""
__A : List[str] = load_image(inputs["""image"""] )
__A : Optional[Any] = self.tokenizer(
inputs["""question"""] , return_tensors=self.framework , padding=lowerCamelCase , truncation=lowerCamelCase )
__A : Union[str, Any] = self.image_processor(images=lowerCamelCase , return_tensors=self.framework )
model_inputs.update(lowerCamelCase )
return model_inputs
def lowercase_( self : List[str] , lowerCamelCase : Tuple ):
"""simple docstring"""
__A : List[str] = self.model(**lowerCamelCase )
return model_outputs
def lowercase_( self : str , lowerCamelCase : Optional[Any] , lowerCamelCase : List[Any]=5 ):
"""simple docstring"""
if top_k > self.model.config.num_labels:
__A : str = self.model.config.num_labels
if self.framework == "pt":
__A : Optional[Any] = model_outputs.logits.sigmoid()[0]
__A , __A : int = probs.topk(lowerCamelCase )
else:
raise ValueError(f"Unsupported framework: {self.framework}" )
__A : Optional[Any] = scores.tolist()
__A : Dict = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(lowerCamelCase , lowerCamelCase )]
| 499
| 0
|
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
UpperCamelCase_ = logging.getLogger(__name__)
def SCREAMING_SNAKE_CASE ( snake_case__ , snake_case__ , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = False , ) -> Union[str, Any]:
__UpperCAmelCase =bnb_quantization_config.load_in_abit
__UpperCAmelCase =bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
'''You have a version of `bitsandbytes` that is not compatible with 8bit quantization,'''
''' make sure you have the latest version of `bitsandbytes` installed.''' )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
'''You have a version of `bitsandbytes` that is not compatible with 4bit quantization,'''
'''make sure you have the latest version of `bitsandbytes` installed.''' )
__UpperCAmelCase =[]
# custom device map
if isinstance(snake_case__ , snake_case__ ) and len(device_map.keys() ) > 1:
__UpperCAmelCase =[key for key, value in device_map.items() if value in ['''disk''', '''cpu''']]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
__UpperCAmelCase =get_keys_to_not_convert(snake_case__ )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(snake_case__ )
__UpperCAmelCase =bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
__UpperCAmelCase =[]
__UpperCAmelCase =bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(snake_case__ )
# compatibility with peft
__UpperCAmelCase =load_in_abit
__UpperCAmelCase =load_in_abit
__UpperCAmelCase =get_parameter_device(snake_case__ )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
'''It is not recommended to quantize a loaded model. '''
'''The model should be instantiated under the `init_empty_weights` context manager.''' )
__UpperCAmelCase =replace_with_bnb_layers(snake_case__ , snake_case__ , modules_to_not_convert=snake_case__ )
# convert param to the right dtype
__UpperCAmelCase =bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
__UpperCAmelCase =name.replace('''.weight''' , '''''' ).replace('''.bias''' , '''''' )
__UpperCAmelCase =getattr(snake_case__ , snake_case__ , snake_case__ )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(snake_case__ ):
param.to(snake_case__ )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError('''No GPU found. A GPU is needed for quantization.''' )
logger.info(
f"""The model device type is {model_device.type}. However, cuda is needed for quantization."""
'''We move the model to cuda.''' )
return model
elif weights_location is None:
raise RuntimeError(
f"""`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} """ )
else:
with init_empty_weights():
__UpperCAmelCase =replace_with_bnb_layers(
snake_case__ , snake_case__ , modules_to_not_convert=snake_case__ )
__UpperCAmelCase =get_quantized_model_device_map(
snake_case__ , snake_case__ , snake_case__ , max_memory=snake_case__ , no_split_module_classes=snake_case__ , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
__UpperCAmelCase =True
__UpperCAmelCase =any(x in list(device_map.values() ) for x in ['''cpu''', '''disk'''] )
load_checkpoint_in_model(
snake_case__ , snake_case__ , snake_case__ , dtype=bnb_quantization_config.torch_dtype , offload_folder=snake_case__ , offload_state_dict=snake_case__ , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(snake_case__ , device_map=snake_case__ , offload_dir=snake_case__ )
def SCREAMING_SNAKE_CASE ( snake_case__ , snake_case__ , snake_case__=None , snake_case__=None , snake_case__=None ) -> Union[str, Any]:
if device_map is None:
if torch.cuda.is_available():
__UpperCAmelCase ={'''''': torch.cuda.current_device()}
else:
raise RuntimeError('''No GPU found. A GPU is needed for quantization.''' )
logger.info('''The device_map was not initialized.''' '''Setting device_map to `{\'\':torch.cuda.current_device()}`.''' )
if isinstance(snake_case__ , snake_case__ ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
'''If passing a string for `device_map`, please choose \'auto\', \'balanced\', \'balanced_low_0\' or '''
'''\'sequential\'.''' )
__UpperCAmelCase ={}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
__UpperCAmelCase ={}
__UpperCAmelCase =special_dtypes
__UpperCAmelCase =no_split_module_classes
__UpperCAmelCase =bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
__UpperCAmelCase =get_balanced_memory(
snake_case__ , low_zero=(device_map == '''balanced_low_0''') , max_memory=snake_case__ , **snake_case__ , )
__UpperCAmelCase =max_memory
__UpperCAmelCase =infer_auto_device_map(snake_case__ , **snake_case__ )
if isinstance(snake_case__ , snake_case__ ):
# check if don't have any quantized module on the cpu
__UpperCAmelCase =bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
__UpperCAmelCase ={
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
'''
Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit
the quantized model. If you want to dispatch the model on the CPU or the disk while keeping
these modules in `torch_dtype`, you need to pass a custom `device_map` to
`load_and_quantize_model`. Check
https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk
for more details.
''' )
else:
logger.info(
'''Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit''' )
del device_map_without_some_modules
return device_map
def SCREAMING_SNAKE_CASE ( snake_case__ , snake_case__ , snake_case__=None , snake_case__=None ) -> List[Any]:
if modules_to_not_convert is None:
__UpperCAmelCase =[]
__UpperCAmelCase , __UpperCAmelCase =_replace_with_bnb_layers(
snake_case__ , snake_case__ , snake_case__ , snake_case__ )
if not has_been_replaced:
logger.warning(
'''You are loading your model in 8bit or 4bit but no linear modules were found in your model.'''
''' this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers.'''
''' Please double check your model architecture, or submit an issue on github if you think this is'''
''' a bug.''' )
return model
def SCREAMING_SNAKE_CASE ( snake_case__ , snake_case__ , snake_case__=None , snake_case__=None , ) -> List[str]:
__UpperCAmelCase =False
for name, module in model.named_children():
if current_key_name is None:
__UpperCAmelCase =[]
current_key_name.append(snake_case__ )
if isinstance(snake_case__ , nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
__UpperCAmelCase ='''.'''.join(snake_case__ )
__UpperCAmelCase =True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
__UpperCAmelCase =False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
__UpperCAmelCase =bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=snake_case__ , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
__UpperCAmelCase =bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError('''load_in_8bit and load_in_4bit can\'t be both False''' )
__UpperCAmelCase =module.weight.data
if module.bias is not None:
__UpperCAmelCase =module.bias.data
bnb_module.requires_grad_(snake_case__ )
setattr(snake_case__ , snake_case__ , snake_case__ )
__UpperCAmelCase =True
if len(list(module.children() ) ) > 0:
__UpperCAmelCase , __UpperCAmelCase =_replace_with_bnb_layers(
snake_case__ , snake_case__ , snake_case__ , snake_case__ )
__UpperCAmelCase =has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def SCREAMING_SNAKE_CASE ( snake_case__ ) -> int:
# Create a copy of the model
with init_empty_weights():
__UpperCAmelCase =deepcopy(snake_case__ ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
__UpperCAmelCase =find_tied_parameters(snake_case__ )
# For compatibility with Accelerate < 0.18
if isinstance(snake_case__ , snake_case__ ):
__UpperCAmelCase =sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
__UpperCAmelCase =sum(snake_case__ , [] )
__UpperCAmelCase =len(snake_case__ ) > 0
# Check if it is a base model
__UpperCAmelCase =False
if hasattr(snake_case__ , '''base_model_prefix''' ):
__UpperCAmelCase =not hasattr(snake_case__ , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
__UpperCAmelCase =list(model.named_children() )
__UpperCAmelCase =[list_modules[-1][0]]
# add last module together with tied weights
__UpperCAmelCase =set(snake_case__ ) - set(snake_case__ )
__UpperCAmelCase =list(set(snake_case__ ) ) + list(snake_case__ )
# remove ".weight" from the keys
__UpperCAmelCase =['''.weight''', '''.bias''']
__UpperCAmelCase =[]
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
__UpperCAmelCase =name.replace(snake_case__ , '''''' )
filtered_module_names.append(snake_case__ )
return filtered_module_names
def SCREAMING_SNAKE_CASE ( snake_case__ ) -> List[str]:
for m in model.modules():
if isinstance(snake_case__ , bnb.nn.Linearabit ):
return True
return False
def SCREAMING_SNAKE_CASE ( snake_case__ ) -> Tuple:
return next(parameter.parameters() ).device
def SCREAMING_SNAKE_CASE ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) -> str:
# if it is not quantized, we quantize and offload the quantized weights and the SCB stats
if fpaa_statistics is None:
set_module_tensor_to_device(snake_case__ , snake_case__ , 0 , dtype=snake_case__ , value=snake_case__ )
__UpperCAmelCase =param_name
__UpperCAmelCase =model
if "." in tensor_name:
__UpperCAmelCase =tensor_name.split('''.''' )
for split in splits[:-1]:
__UpperCAmelCase =getattr(snake_case__ , snake_case__ )
if new_module is None:
raise ValueError(f"""{module} has no attribute {split}.""" )
__UpperCAmelCase =new_module
__UpperCAmelCase =splits[-1]
# offload weights
__UpperCAmelCase =False
offload_weight(module._parameters[tensor_name] , snake_case__ , snake_case__ , index=snake_case__ )
if hasattr(module._parameters[tensor_name] , '''SCB''' ):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace('''weight''' , '''SCB''' ) , snake_case__ , index=snake_case__ , )
else:
offload_weight(snake_case__ , snake_case__ , snake_case__ , index=snake_case__ )
offload_weight(snake_case__ , param_name.replace('''weight''' , '''SCB''' ) , snake_case__ , index=snake_case__ )
set_module_tensor_to_device(snake_case__ , snake_case__ , '''meta''' , dtype=snake_case__ , value=torch.empty(*param.size() ) )
| 132
|
from __future__ import annotations
import unittest
from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel
@require_tf
class _SCREAMING_SNAKE_CASE :
a_ : Any = BlenderbotSmallConfig
a_ : List[str] = {}
a_ : Any = '''gelu'''
def __init__(self , UpperCAmelCase , UpperCAmelCase=1_3 , UpperCAmelCase=7 , UpperCAmelCase=True , UpperCAmelCase=False , UpperCAmelCase=9_9 , UpperCAmelCase=3_2 , UpperCAmelCase=2 , UpperCAmelCase=4 , UpperCAmelCase=3_7 , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=2_0 , UpperCAmelCase=2 , UpperCAmelCase=1 , UpperCAmelCase=0 , ):
'''simple docstring'''
__UpperCAmelCase =parent
__UpperCAmelCase =batch_size
__UpperCAmelCase =seq_length
__UpperCAmelCase =is_training
__UpperCAmelCase =use_labels
__UpperCAmelCase =vocab_size
__UpperCAmelCase =hidden_size
__UpperCAmelCase =num_hidden_layers
__UpperCAmelCase =num_attention_heads
__UpperCAmelCase =intermediate_size
__UpperCAmelCase =hidden_dropout_prob
__UpperCAmelCase =attention_probs_dropout_prob
__UpperCAmelCase =max_position_embeddings
__UpperCAmelCase =eos_token_id
__UpperCAmelCase =pad_token_id
__UpperCAmelCase =bos_token_id
def A__ (self):
'''simple docstring'''
__UpperCAmelCase =ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size)
__UpperCAmelCase =tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size) , 1)
__UpperCAmelCase =tf.concat([input_ids, eos_tensor] , axis=1)
__UpperCAmelCase =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
__UpperCAmelCase =self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
__UpperCAmelCase =prepare_blenderbot_small_inputs_dict(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase)
return config, inputs_dict
def A__ (self , UpperCAmelCase , UpperCAmelCase):
'''simple docstring'''
__UpperCAmelCase =TFBlenderbotSmallModel(config=UpperCAmelCase).get_decoder()
__UpperCAmelCase =inputs_dict['''input_ids''']
__UpperCAmelCase =input_ids[:1, :]
__UpperCAmelCase =inputs_dict['''attention_mask'''][:1, :]
__UpperCAmelCase =inputs_dict['''head_mask''']
__UpperCAmelCase =1
# first forward pass
__UpperCAmelCase =model(UpperCAmelCase , attention_mask=UpperCAmelCase , head_mask=UpperCAmelCase , use_cache=UpperCAmelCase)
__UpperCAmelCase , __UpperCAmelCase =outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__UpperCAmelCase =ids_tensor((self.batch_size, 3) , config.vocab_size)
__UpperCAmelCase =tf.cast(ids_tensor((self.batch_size, 3) , 2) , tf.inta)
# append to next input_ids and
__UpperCAmelCase =tf.concat([input_ids, next_tokens] , axis=-1)
__UpperCAmelCase =tf.concat([attention_mask, next_attn_mask] , axis=-1)
__UpperCAmelCase =model(UpperCAmelCase , attention_mask=UpperCAmelCase)[0]
__UpperCAmelCase =model(UpperCAmelCase , attention_mask=UpperCAmelCase , past_key_values=UpperCAmelCase)[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1])
# select random slice
__UpperCAmelCase =int(ids_tensor((1,) , output_from_past.shape[-1]))
__UpperCAmelCase =output_from_no_past[:, -3:, random_slice_idx]
__UpperCAmelCase =output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(UpperCAmelCase , UpperCAmelCase , rtol=1e-3)
def SCREAMING_SNAKE_CASE ( snake_case__ , snake_case__ , snake_case__ , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__=None , ) -> List[str]:
if attention_mask is None:
__UpperCAmelCase =tf.cast(tf.math.not_equal(snake_case__ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
__UpperCAmelCase =tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
__UpperCAmelCase =tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__UpperCAmelCase =tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
__UpperCAmelCase =tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class _SCREAMING_SNAKE_CASE ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
a_ : List[Any] = (
(TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else ()
)
a_ : Optional[Any] = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else ()
a_ : Tuple = (
{
'''conversational''': TFBlenderbotSmallForConditionalGeneration,
'''feature-extraction''': TFBlenderbotSmallModel,
'''summarization''': TFBlenderbotSmallForConditionalGeneration,
'''text2text-generation''': TFBlenderbotSmallForConditionalGeneration,
'''translation''': TFBlenderbotSmallForConditionalGeneration,
}
if is_tf_available()
else {}
)
a_ : Tuple = True
a_ : List[str] = False
a_ : Union[str, Any] = False
def A__ (self):
'''simple docstring'''
__UpperCAmelCase =TFBlenderbotSmallModelTester(self)
__UpperCAmelCase =ConfigTester(self , config_class=UpperCAmelCase)
def A__ (self):
'''simple docstring'''
self.config_tester.run_common_tests()
def A__ (self):
'''simple docstring'''
__UpperCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*UpperCAmelCase)
@require_tokenizers
@require_tf
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
a_ : str = [
'''Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like '''
''' i\'m going to throw up.\nand why is that?'''
]
a_ : str = '''facebook/blenderbot_small-90M'''
@cached_property
def A__ (self):
'''simple docstring'''
return BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''')
@cached_property
def A__ (self):
'''simple docstring'''
__UpperCAmelCase =TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name)
return model
@slow
def A__ (self):
'''simple docstring'''
__UpperCAmelCase =self.tokenizer(self.src_text , return_tensors='''tf''')
__UpperCAmelCase =self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=UpperCAmelCase , )
__UpperCAmelCase =self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=UpperCAmelCase)[0]
assert generated_words in (
"i don't know. i just feel like i'm going to throw up. it's not fun.",
"i'm not sure. i just feel like i've been feeling like i have to be in a certain place",
"i'm not sure. i just feel like i've been in a bad situation.",
)
| 132
| 1
|
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
UpperCamelCase_ = None
UpperCamelCase_ = "<" if sys.byteorder == "little" else ">"
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
UpperCamelCase_ = [
np.dtype("|b1"),
np.dtype("|u1"),
np.dtype("<u2"),
np.dtype(">u2"),
np.dtype("<i2"),
np.dtype(">i2"),
np.dtype("<u4"),
np.dtype(">u4"),
np.dtype("<i4"),
np.dtype(">i4"),
np.dtype("<f4"),
np.dtype(">f4"),
np.dtype("<f8"),
np.dtype(">f8"),
]
@dataclass
class a_ :
UpperCamelCase__ : bool =True
UpperCamelCase__ : Optional[str] =None
# Automatically constructed
UpperCamelCase__ : ClassVar[str] ="PIL.Image.Image"
UpperCamelCase__ : ClassVar[Any] =pa.struct({"bytes": pa.binary(), "path": pa.string()} )
UpperCamelCase__ : str =field(default="Image" , init=lowercase__ , repr=lowercase__ )
def __call__( self :Tuple) -> List[Any]:
return self.pa_type
def __a ( self :Union[str, Any] , _lowercase :Union[str, bytes, dict, np.ndarray, "PIL.Image.Image"]) -> Any:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''')
if isinstance(__lowercase , __lowercase):
UpperCAmelCase_ = np.array(__lowercase)
if isinstance(__lowercase , __lowercase):
return {"path": value, "bytes": None}
elif isinstance(__lowercase , __lowercase):
return {"path": None, "bytes": value}
elif isinstance(__lowercase , np.ndarray):
# convert the image array to PNG/TIFF bytes
return encode_np_array(__lowercase)
elif isinstance(__lowercase , PIL.Image.Image):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(__lowercase)
elif value.get('''path''') is not None and os.path.isfile(value['''path''']):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get('''path''')}
elif value.get('''bytes''') is not None or value.get('''path''') is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get('''bytes'''), "path": value.get('''path''')}
else:
raise ValueError(
f"An image sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.")
def __a ( self :Optional[int] , _lowercase :dict , _lowercase :Optional[Any]=None) -> Optional[int]:
if not self.decode:
raise RuntimeError('''Decoding is disabled for this feature. Please use Image(decode=True) instead.''')
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support decoding images, please install \'Pillow\'.''')
if token_per_repo_id is None:
UpperCAmelCase_ = {}
UpperCAmelCase_ , UpperCAmelCase_ = value['''path'''], value['''bytes''']
if bytes_ is None:
if path is None:
raise ValueError(f"An image should have one of \'path\' or \'bytes\' but both are None in {value}.")
else:
if is_local_path(__lowercase):
UpperCAmelCase_ = PIL.Image.open(__lowercase)
else:
UpperCAmelCase_ = path.split('''::''')[-1]
try:
UpperCAmelCase_ = string_to_dict(__lowercase , config.HUB_DATASETS_URL)['''repo_id''']
UpperCAmelCase_ = token_per_repo_id.get(__lowercase)
except ValueError:
UpperCAmelCase_ = None
with xopen(__lowercase , '''rb''' , use_auth_token=__lowercase) as f:
UpperCAmelCase_ = BytesIO(f.read())
UpperCAmelCase_ = PIL.Image.open(bytes_)
else:
UpperCAmelCase_ = PIL.Image.open(BytesIO(bytes_))
image.load() # to avoid "Too many open files" errors
return image
def __a ( self :str) -> str:
from .features import Value
return (
self
if self.decode
else {
"bytes": Value('''binary'''),
"path": Value('''string'''),
}
)
def __a ( self :str , _lowercase :Union[pa.StringArray, pa.StructArray, pa.ListArray]) -> Optional[int]:
if pa.types.is_string(storage.type):
UpperCAmelCase_ = pa.array([None] * len(__lowercase) , type=pa.binary())
UpperCAmelCase_ = pa.StructArray.from_arrays([bytes_array, storage] , ['''bytes''', '''path'''] , mask=storage.is_null())
elif pa.types.is_binary(storage.type):
UpperCAmelCase_ = pa.array([None] * len(__lowercase) , type=pa.string())
UpperCAmelCase_ = pa.StructArray.from_arrays([storage, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null())
elif pa.types.is_struct(storage.type):
if storage.type.get_field_index('''bytes''') >= 0:
UpperCAmelCase_ = storage.field('''bytes''')
else:
UpperCAmelCase_ = pa.array([None] * len(__lowercase) , type=pa.binary())
if storage.type.get_field_index('''path''') >= 0:
UpperCAmelCase_ = storage.field('''path''')
else:
UpperCAmelCase_ = pa.array([None] * len(__lowercase) , type=pa.string())
UpperCAmelCase_ = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null())
elif pa.types.is_list(storage.type):
UpperCAmelCase_ = pa.array(
[encode_np_array(np.array(__lowercase))['''bytes'''] if arr is not None else None for arr in storage.to_pylist()] , type=pa.binary() , )
UpperCAmelCase_ = pa.array([None] * len(__lowercase) , type=pa.string())
UpperCAmelCase_ = pa.StructArray.from_arrays(
[bytes_array, path_array] , ['''bytes''', '''path'''] , mask=bytes_array.is_null())
return array_cast(__lowercase , self.pa_type)
def __a ( self :Optional[Any] , _lowercase :pa.StructArray) -> Optional[Any]:
@no_op_if_value_is_null
def path_to_bytes(_lowercase :List[Any]):
with xopen(__lowercase , '''rb''') as f:
UpperCAmelCase_ = f.read()
return bytes_
UpperCAmelCase_ = pa.array(
[
(path_to_bytes(x['''path''']) if x['''bytes'''] is None else x['''bytes''']) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
UpperCAmelCase_ = pa.array(
[os.path.basename(__lowercase) if path is not None else None for path in storage.field('''path''').to_pylist()] , type=pa.string() , )
UpperCAmelCase_ = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=bytes_array.is_null())
return array_cast(__lowercase , self.pa_type)
def A ( ) -> List[Any]:
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
UpperCAmelCase_ = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def A ( __UpperCAmelCase ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = BytesIO()
if image.format in list_image_compression_formats():
UpperCAmelCase_ = image.format
else:
UpperCAmelCase_ = '''PNG''' if image.mode in ['''1''', '''L''', '''LA''', '''RGB''', '''RGBA'''] else '''TIFF'''
image.save(SCREAMING_SNAKE_CASE_ , format=SCREAMING_SNAKE_CASE_ )
return buffer.getvalue()
def A ( __UpperCAmelCase ) -> Dict:
'''simple docstring'''
if hasattr(SCREAMING_SNAKE_CASE_ , '''filename''' ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(SCREAMING_SNAKE_CASE_ )}
def A ( __UpperCAmelCase ) -> Any:
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
UpperCAmelCase_ = array.dtype
UpperCAmelCase_ = dtype.byteorder if dtype.byteorder != '''=''' else _NATIVE_BYTEORDER
UpperCAmelCase_ = dtype.kind
UpperCAmelCase_ = dtype.itemsize
UpperCAmelCase_ = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
UpperCAmelCase_ = np.dtype('''|u1''' )
if dtype_kind not in ["u", "i"]:
raise TypeError(
f"Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays." )
if dtype is not dest_dtype:
warnings.warn(f"Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'" )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
UpperCAmelCase_ = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
UpperCAmelCase_ = dtype_byteorder + dtype_kind + str(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_ = np.dtype(SCREAMING_SNAKE_CASE_ )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(f"Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'" )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
f"Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}" )
UpperCAmelCase_ = PIL.Image.fromarray(array.astype(SCREAMING_SNAKE_CASE_ ) )
return {"path": None, "bytes": image_to_bytes(SCREAMING_SNAKE_CASE_ )}
def A ( __UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
if objs:
UpperCAmelCase_ , UpperCAmelCase_ = first_non_null_value(SCREAMING_SNAKE_CASE_ )
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(SCREAMING_SNAKE_CASE_ , np.ndarray ):
UpperCAmelCase_ = no_op_if_value_is_null(SCREAMING_SNAKE_CASE_ )
return [obj_to_image_dict_func(SCREAMING_SNAKE_CASE_ ) for obj in objs]
elif isinstance(SCREAMING_SNAKE_CASE_ , PIL.Image.Image ):
UpperCAmelCase_ = no_op_if_value_is_null(SCREAMING_SNAKE_CASE_ )
return [obj_to_image_dict_func(SCREAMING_SNAKE_CASE_ ) for obj in objs]
else:
return objs
else:
return objs
| 715
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def A ( __UpperCAmelCase=None ) -> List[str]:
'''simple docstring'''
if subparsers is not None:
UpperCAmelCase_ = subparsers.add_parser('''test''' )
else:
UpperCAmelCase_ = argparse.ArgumentParser('''Accelerate test command''' )
parser.add_argument(
'''--config_file''' , default=__UpperCAmelCase , help=(
'''The path to use to store the config file. Will default to a file named default_config.yaml in the cache '''
'''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '''
'''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '''
'''with \'huggingface\'.'''
) , )
if subparsers is not None:
parser.set_defaults(func=__UpperCAmelCase )
return parser
def A ( __UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ['''test_utils''', '''scripts''', '''test_script.py'''] )
if args.config_file is None:
UpperCAmelCase_ = script_name
else:
UpperCAmelCase_ = f"--config_file={args.config_file} {script_name}"
UpperCAmelCase_ = ['''accelerate-launch'''] + test_args.split()
UpperCAmelCase_ = execute_subprocess_async(__UpperCAmelCase , env=os.environ.copy() )
if result.returncode == 0:
print('''Test is a success! You are ready for your distributed training!''' )
def A ( ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = test_command_parser()
UpperCAmelCase_ = parser.parse_args()
test_command(__UpperCAmelCase )
if __name__ == "__main__":
main()
| 561
| 0
|
"""simple docstring"""
from collections.abc import Generator
from math import sin
def __magic_name__ ( __snake_case : bytes ) -> bytes:
if len(__snake_case ) != 32:
raise ValueError("Input must be of length 32" )
lowercase : List[Any] = B""
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def __magic_name__ ( __snake_case : int ) -> bytes:
if i < 0:
raise ValueError("Input must be non-negative" )
lowercase : Dict = format(__snake_case , "08x" )[-8:]
lowercase : Union[str, Any] = B""
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode("utf-8" )
return little_endian_hex
def __magic_name__ ( __snake_case : bytes ) -> bytes:
lowercase : Tuple = B""
for char in message:
bit_string += format(__snake_case , "08b" ).encode("utf-8" )
lowercase : List[str] = format(len(__snake_case ) , "064b" ).encode("utf-8" )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(__snake_case ) % 512 != 448:
bit_string += b"0"
bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] )
return bit_string
def __magic_name__ ( __snake_case : bytes ) -> Generator[list[int], None, None]:
if len(__snake_case ) % 512 != 0:
raise ValueError("Input must have length that's a multiple of 512" )
for pos in range(0 , len(__snake_case ) , 512 ):
lowercase : Tuple = bit_string[pos : pos + 512]
lowercase : Tuple = []
for i in range(0 , 512 , 32 ):
block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) )
yield block_words
def __magic_name__ ( __snake_case : int ) -> int:
if i < 0:
raise ValueError("Input must be non-negative" )
lowercase : Union[str, Any] = format(__snake_case , "032b" )
lowercase : List[Any] = ""
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(__snake_case , 2 )
def __magic_name__ ( __snake_case : int , __snake_case : int ) -> int:
return (a + b) % 2**32
def __magic_name__ ( __snake_case : int , __snake_case : int ) -> int:
if i < 0:
raise ValueError("Input must be non-negative" )
if shift < 0:
raise ValueError("Shift must be non-negative" )
return ((i << shift) ^ (i >> (32 - shift))) % 2**32
def __magic_name__ ( __snake_case : bytes ) -> bytes:
lowercase : Dict = preprocess(__snake_case )
lowercase : Dict = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )]
# Starting states
lowercase : str = 0X6745_2301
lowercase : Optional[int] = 0Xefcd_ab89
lowercase : Dict = 0X98ba_dcfe
lowercase : Tuple = 0X1032_5476
lowercase : Dict = [
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(__snake_case ):
lowercase : Tuple = aa
lowercase : Any = ba
lowercase : Optional[int] = ca
lowercase : Tuple = da
# Hash current chunk
for i in range(64 ):
if i <= 15:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
lowercase : str = d ^ (b & (c ^ d))
lowercase : str = i
elif i <= 31:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
lowercase : Optional[Any] = c ^ (d & (b ^ c))
lowercase : str = (5 * i + 1) % 16
elif i <= 47:
lowercase : int = b ^ c ^ d
lowercase : List[str] = (3 * i + 5) % 16
else:
lowercase : Tuple = c ^ (b | not_aa(__snake_case ))
lowercase : List[Any] = (7 * i) % 16
lowercase : Optional[int] = (f + a + added_consts[i] + block_words[g]) % 2**32
lowercase : int = d
lowercase : int = c
lowercase : Dict = b
lowercase : str = sum_aa(__snake_case , left_rotate_aa(__snake_case , shift_amounts[i] ) )
# Add hashed chunk to running total
lowercase : Tuple = sum_aa(__snake_case , __snake_case )
lowercase : List[str] = sum_aa(__snake_case , __snake_case )
lowercase : str = sum_aa(__snake_case , __snake_case )
lowercase : Any = sum_aa(__snake_case , __snake_case )
lowercase : Dict = reformat_hex(__snake_case ) + reformat_hex(__snake_case ) + reformat_hex(__snake_case ) + reformat_hex(__snake_case )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 361
|
"""simple docstring"""
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class a__ ( a_ ):
__lowerCAmelCase = (DDPMScheduler,)
def __magic_name__ ( self , **_a ):
lowercase : Dict = {
"num_train_timesteps": 1_000,
"beta_start": 0.0_0_0_1,
"beta_end": 0.0_2,
"beta_schedule": "linear",
"variance_type": "fixed_small",
"clip_sample": True,
}
config.update(**_a )
return config
def __magic_name__ ( self ):
for timesteps in [1, 5, 100, 1_000]:
self.check_over_configs(num_train_timesteps=_a )
def __magic_name__ ( self ):
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2] ):
self.check_over_configs(beta_start=_a , beta_end=_a )
def __magic_name__ ( self ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_a )
def __magic_name__ ( self ):
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=_a )
def __magic_name__ ( self ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_a )
def __magic_name__ ( self ):
self.check_over_configs(thresholding=_a )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=_a , prediction_type=_a , sample_max_value=_a , )
def __magic_name__ ( self ):
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=_a )
def __magic_name__ ( self ):
for t in [0, 500, 999]:
self.check_over_forward(time_step=_a )
def __magic_name__ ( self ):
lowercase : Union[str, Any] = self.scheduler_classes[0]
lowercase : Any = self.get_scheduler_config()
lowercase : Dict = scheduler_class(**_a )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0_9_7_9 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.0_2 ) ) < 1E-5
def __magic_name__ ( self ):
lowercase : Optional[int] = self.scheduler_classes[0]
lowercase : Tuple = self.get_scheduler_config()
lowercase : Dict = scheduler_class(**_a )
lowercase : Dict = len(_a )
lowercase : str = self.dummy_model()
lowercase : Optional[int] = self.dummy_sample_deter
lowercase : List[Any] = torch.manual_seed(0 )
for t in reversed(range(_a ) ):
# 1. predict noise residual
lowercase : Optional[Any] = model(_a , _a )
# 2. predict previous mean of sample x_t-1
lowercase : str = scheduler.step(_a , _a , _a , generator=_a ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
lowercase : List[str] = pred_prev_sample
lowercase : Dict = torch.sum(torch.abs(_a ) )
lowercase : List[Any] = torch.mean(torch.abs(_a ) )
assert abs(result_sum.item() - 2_5_8.9_6_0_6 ) < 1E-2
assert abs(result_mean.item() - 0.3_3_7_2 ) < 1E-3
def __magic_name__ ( self ):
lowercase : Optional[int] = self.scheduler_classes[0]
lowercase : Any = self.get_scheduler_config(prediction_type="v_prediction" )
lowercase : int = scheduler_class(**_a )
lowercase : str = len(_a )
lowercase : Optional[int] = self.dummy_model()
lowercase : List[str] = self.dummy_sample_deter
lowercase : Optional[Any] = torch.manual_seed(0 )
for t in reversed(range(_a ) ):
# 1. predict noise residual
lowercase : Union[str, Any] = model(_a , _a )
# 2. predict previous mean of sample x_t-1
lowercase : Optional[Any] = scheduler.step(_a , _a , _a , generator=_a ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
lowercase : Dict = pred_prev_sample
lowercase : str = torch.sum(torch.abs(_a ) )
lowercase : Tuple = torch.mean(torch.abs(_a ) )
assert abs(result_sum.item() - 2_0_2.0_2_9_6 ) < 1E-2
assert abs(result_mean.item() - 0.2_6_3_1 ) < 1E-3
def __magic_name__ ( self ):
lowercase : List[Any] = self.scheduler_classes[0]
lowercase : Tuple = self.get_scheduler_config()
lowercase : Tuple = scheduler_class(**_a )
lowercase : List[str] = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=_a )
lowercase : Union[str, Any] = scheduler.timesteps
for i, timestep in enumerate(_a ):
if i == len(_a ) - 1:
lowercase : Any = -1
else:
lowercase : Union[str, Any] = timesteps[i + 1]
lowercase : Optional[int] = scheduler.previous_timestep(_a )
lowercase : Union[str, Any] = prev_t.item()
self.assertEqual(_a , _a )
def __magic_name__ ( self ):
lowercase : str = self.scheduler_classes[0]
lowercase : List[str] = self.get_scheduler_config()
lowercase : List[Any] = scheduler_class(**_a )
lowercase : Optional[int] = [100, 87, 50, 51, 0]
with self.assertRaises(_a , msg="`custom_timesteps` must be in descending order." ):
scheduler.set_timesteps(timesteps=_a )
def __magic_name__ ( self ):
lowercase : Dict = self.scheduler_classes[0]
lowercase : Union[str, Any] = self.get_scheduler_config()
lowercase : Any = scheduler_class(**_a )
lowercase : int = [100, 87, 50, 1, 0]
lowercase : Any = len(_a )
with self.assertRaises(_a , msg="Can only pass one of `num_inference_steps` or `custom_timesteps`." ):
scheduler.set_timesteps(num_inference_steps=_a , timesteps=_a )
def __magic_name__ ( self ):
lowercase : str = self.scheduler_classes[0]
lowercase : Tuple = self.get_scheduler_config()
lowercase : Optional[int] = scheduler_class(**_a )
lowercase : List[str] = [scheduler.config.num_train_timesteps]
with self.assertRaises(
_a , msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}" , ):
scheduler.set_timesteps(timesteps=_a )
| 361
| 1
|
from __future__ import annotations
import math
def _UpperCamelCase ( lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : Any = u
for i in range(1 , lowercase__ ):
__SCREAMING_SNAKE_CASE : Optional[Any] = temp * (u - i)
return temp
def _UpperCamelCase ( ):
__SCREAMING_SNAKE_CASE : str = int(input('''enter the numbers of values: ''' ) )
__SCREAMING_SNAKE_CASE : list[list[float]] = []
for _ in range(lowercase__ ):
y.append([] )
for i in range(lowercase__ ):
for j in range(lowercase__ ):
y[i].append(lowercase__ )
__SCREAMING_SNAKE_CASE : List[str] = 0
print('''enter the values of parameters in a list: ''' )
__SCREAMING_SNAKE_CASE : int = list(map(lowercase__ , input().split() ) )
print('''enter the values of corresponding parameters: ''' )
for i in range(lowercase__ ):
__SCREAMING_SNAKE_CASE : Optional[int] = float(input() )
__SCREAMING_SNAKE_CASE : List[Any] = int(input('''enter the value to interpolate: ''' ) )
__SCREAMING_SNAKE_CASE : List[str] = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , lowercase__ ):
for j in range(n - i ):
__SCREAMING_SNAKE_CASE : str = y[j + 1][i - 1] - y[j][i - 1]
__SCREAMING_SNAKE_CASE : List[str] = y[0][0]
for i in range(1 , lowercase__ ):
summ += (ucal(lowercase__ , lowercase__ ) * y[0][i]) / math.factorial(lowercase__ )
print(F'''the value at {value} is {summ}''' )
if __name__ == "__main__":
main()
| 260
|
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def __magic_name__( self :Union[str, Any] , lowerCAmelCase__ :str , lowerCAmelCase__ :int ) -> Optional[int]:
return f'''gaussian_noise_s={seed}_shape={'_'.join([str(lowerCAmelCase__ ) for s in shape] )}.npy'''
def __magic_name__( self :List[str] ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def __magic_name__( self :Optional[Any] , lowerCAmelCase__ :int=0 , lowerCAmelCase__ :Any=(4, 4, 64, 64) , lowerCAmelCase__ :List[Any]=False ) -> List[str]:
__SCREAMING_SNAKE_CASE : Optional[int] = jnp.bfloataa if fpaa else jnp.floataa
__SCREAMING_SNAKE_CASE : Optional[int] = jnp.array(load_hf_numpy(self.get_file_format(lowerCAmelCase__ , lowerCAmelCase__ ) ) , dtype=lowerCAmelCase__ )
return image
def __magic_name__( self :Tuple , lowerCAmelCase__ :Tuple=False , lowerCAmelCase__ :int="CompVis/stable-diffusion-v1-4" ) -> Tuple:
__SCREAMING_SNAKE_CASE : Optional[Any] = jnp.bfloataa if fpaa else jnp.floataa
__SCREAMING_SNAKE_CASE : Optional[int] = '''bf16''' if fpaa else None
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Any = FlaxUNetaDConditionModel.from_pretrained(
lowerCAmelCase__ , subfolder='''unet''' , dtype=lowerCAmelCase__ , revision=lowerCAmelCase__ )
return model, params
def __magic_name__( self :Any , lowerCAmelCase__ :str=0 , lowerCAmelCase__ :Optional[int]=(4, 77, 768) , lowerCAmelCase__ :str=False ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE : str = jnp.bfloataa if fpaa else jnp.floataa
__SCREAMING_SNAKE_CASE : Any = jnp.array(load_hf_numpy(self.get_file_format(lowerCAmelCase__ , lowerCAmelCase__ ) ) , dtype=lowerCAmelCase__ )
return hidden_states
@parameterized.expand(
[
# fmt: off
[83, 4, [-0.2323, -0.1304, 0.0813, -0.3093, -0.0919, -0.1571, -0.1125, -0.5806]],
[17, 0.55, [-0.0831, -0.2443, 0.0901, -0.0919, 0.3396, 0.0103, -0.3743, 0.0701]],
[8, 0.89, [-0.4863, 0.0859, 0.0875, -0.1658, 0.9199, -0.0114, 0.4839, 0.4639]],
[3, 1_000, [-0.5649, 0.2402, -0.5518, 0.1248, 1.1328, -0.2443, -0.0325, -1.0078]],
# fmt: on
] )
def __magic_name__( self :str , lowerCAmelCase__ :str , lowerCAmelCase__ :str , lowerCAmelCase__ :Optional[int] ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : int = self.get_unet_model(model_id='''CompVis/stable-diffusion-v1-4''' , fpaa=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = self.get_latents(lowerCAmelCase__ , fpaa=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = self.get_encoder_hidden_states(lowerCAmelCase__ , fpaa=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = model.apply(
{'''params''': params} , lowerCAmelCase__ , jnp.array(lowerCAmelCase__ , dtype=jnp.intaa ) , encoder_hidden_states=lowerCAmelCase__ , ).sample
assert sample.shape == latents.shape
__SCREAMING_SNAKE_CASE : int = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
__SCREAMING_SNAKE_CASE : Tuple = jnp.array(lowerCAmelCase__ , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[83, 4, [0.1514, 0.0807, 0.1624, 0.1016, -0.1896, 0.0263, 0.0677, 0.2310]],
[17, 0.55, [0.1164, -0.0216, 0.0170, 0.1589, -0.3120, 0.1005, -0.0581, -0.1458]],
[8, 0.89, [-0.1758, -0.0169, 0.1004, -0.1411, 0.1312, 0.1103, -0.1996, 0.2139]],
[3, 1_000, [0.1214, 0.0352, -0.0731, -0.1562, -0.0994, -0.0906, -0.2340, -0.0539]],
# fmt: on
] )
def __magic_name__( self :List[Any] , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :str ) -> str:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[Any] = self.get_unet_model(model_id='''stabilityai/stable-diffusion-2''' , fpaa=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Any = self.get_latents(lowerCAmelCase__ , shape=(4, 4, 96, 96) , fpaa=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Any = self.get_encoder_hidden_states(lowerCAmelCase__ , shape=(4, 77, 1_024) , fpaa=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = model.apply(
{'''params''': params} , lowerCAmelCase__ , jnp.array(lowerCAmelCase__ , dtype=jnp.intaa ) , encoder_hidden_states=lowerCAmelCase__ , ).sample
assert sample.shape == latents.shape
__SCREAMING_SNAKE_CASE : Optional[Any] = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
__SCREAMING_SNAKE_CASE : str = jnp.array(lowerCAmelCase__ , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-2 )
| 260
| 1
|
'''simple docstring'''
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def __a ( A__ ) -> str:
lowerCAmelCase = args.pruning_method
lowerCAmelCase = args.threshold
lowerCAmelCase = args.model_name_or_path.rstrip("/" )
lowerCAmelCase = args.target_model_path
print(f"Load fine-pruned model from {model_name_or_path}" )
lowerCAmelCase = torch.load(os.path.join(A__ , "pytorch_model.bin" ) )
lowerCAmelCase = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
lowerCAmelCase = tensor
print(f"Copied layer {name}" )
elif "classifier" in name or "qa_output" in name:
lowerCAmelCase = tensor
print(f"Copied layer {name}" )
elif "bias" in name:
lowerCAmelCase = tensor
print(f"Copied layer {name}" )
else:
if pruning_method == "magnitude":
lowerCAmelCase = MagnitudeBinarizer.apply(inputs=A__ , threshold=A__ )
lowerCAmelCase = tensor * mask
print(f"Pruned layer {name}" )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
lowerCAmelCase = name[:-6]
lowerCAmelCase = model[f"{prefix_}mask_scores"]
lowerCAmelCase = TopKBinarizer.apply(A__ , A__ )
lowerCAmelCase = tensor * mask
print(f"Pruned layer {name}" )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
lowerCAmelCase = name[:-6]
lowerCAmelCase = model[f"{prefix_}mask_scores"]
lowerCAmelCase = ThresholdBinarizer.apply(A__ , A__ , A__ )
lowerCAmelCase = tensor * mask
print(f"Pruned layer {name}" )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
lowerCAmelCase = name[:-6]
lowerCAmelCase = model[f"{prefix_}mask_scores"]
lowerCAmelCase , lowerCAmelCase = -0.1, 1.1
lowerCAmelCase = torch.sigmoid(A__ )
lowerCAmelCase = s * (r - l) + l
lowerCAmelCase = s_bar.clamp(min=0.0 , max=1.0 )
lowerCAmelCase = tensor * mask
print(f"Pruned layer {name}" )
else:
raise ValueError("Unknown pruning method" )
if target_model_path is None:
lowerCAmelCase = os.path.join(
os.path.dirname(A__ ) , f"bertarized_{os.path.basename(A__ )}" )
if not os.path.isdir(A__ ):
shutil.copytree(A__ , A__ )
print(f"\nCreated folder {target_model_path}" )
torch.save(A__ , os.path.join(A__ , "pytorch_model.bin" ) )
print("\nPruned model saved! See you later!" )
if __name__ == "__main__":
lowercase : Dict = argparse.ArgumentParser()
parser.add_argument(
'--pruning_method',
choices=['l0', 'magnitude', 'topK', 'sigmoied_threshold'],
type=str,
required=True,
help=(
'Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,'
' sigmoied_threshold = Soft movement pruning)'
),
)
parser.add_argument(
'--threshold',
type=float,
required=False,
help=(
'For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model.'
'For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared.'
'Not needed for `l0`'
),
)
parser.add_argument(
'--model_name_or_path',
type=str,
required=True,
help='Folder containing the model that was previously fine-pruned',
)
parser.add_argument(
'--target_model_path',
default=None,
type=str,
required=False,
help='Folder containing the model that was previously fine-pruned',
)
lowercase : str = parser.parse_args()
main(args)
| 649
|
'''simple docstring'''
import argparse
import os
import re
import packaging.version
lowercase : int = 'examples/'
lowercase : int = {
'examples': (re.compile(r'^check_min_version\("[^"]+"\)\s*$', re.MULTILINE), 'check_min_version("VERSION")\n'),
'init': (re.compile(r'^__version__\s+=\s+"([^"]+)"\s*$', re.MULTILINE), '__version__ = "VERSION"\n'),
'setup': (re.compile(r'^(\s*)version\s*=\s*"[^"]+",', re.MULTILINE), r'\1version="VERSION",'),
'doc': (re.compile(r'^(\s*)release\s*=\s*"[^"]+"$', re.MULTILINE), 'release = "VERSION"\n'),
}
lowercase : Union[str, Any] = {
'init': 'src/transformers/__init__.py',
'setup': 'setup.py',
}
lowercase : Union[str, Any] = 'README.md'
def __a ( A__ , A__ , A__ ) -> Dict:
with open(A__ , "r" , encoding="utf-8" , newline="\n" ) as f:
lowerCAmelCase = f.read()
lowerCAmelCase , lowerCAmelCase = REPLACE_PATTERNS[pattern]
lowerCAmelCase = replace.replace("VERSION" , A__ )
lowerCAmelCase = re_pattern.sub(A__ , A__ )
with open(A__ , "w" , encoding="utf-8" , newline="\n" ) as f:
f.write(A__ )
def __a ( A__ ) -> List[Any]:
for folder, directories, fnames in os.walk(A__ ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("research_projects" )
if "legacy" in directories:
directories.remove("legacy" )
for fname in fnames:
if fname.endswith(".py" ):
update_version_in_file(os.path.join(A__ , A__ ) , A__ , pattern="examples" )
def __a ( A__ , A__=False ) -> Tuple:
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(A__ , A__ , A__ )
if not patch:
update_version_in_examples(A__ )
def __a ( ) -> List[str]:
lowerCAmelCase = "🤗 Transformers currently provides the following architectures"
lowerCAmelCase = "1. Want to contribute a new model?"
with open(A__ , "r" , encoding="utf-8" , newline="\n" ) as f:
lowerCAmelCase = f.readlines()
# Find the start of the list.
lowerCAmelCase = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
lowerCAmelCase = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("1." ):
lowerCAmelCase = lines[index].replace(
"https://huggingface.co/docs/transformers/main/model_doc" , "https://huggingface.co/docs/transformers/model_doc" , )
index += 1
with open(A__ , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(A__ )
def __a ( ) -> Optional[Any]:
with open(REPLACE_FILES["init"] , "r" ) as f:
lowerCAmelCase = f.read()
lowerCAmelCase = REPLACE_PATTERNS["init"][0].search(A__ ).groups()[0]
return packaging.version.parse(A__ )
def __a ( A__=False ) -> Optional[int]:
lowerCAmelCase = get_version()
if patch and default_version.is_devrelease:
raise ValueError("Can't create a patch version from the dev branch, checkout a released version!" )
if default_version.is_devrelease:
lowerCAmelCase = default_version.base_version
elif patch:
lowerCAmelCase = f"{default_version.major}.{default_version.minor}.{default_version.micro + 1}"
else:
lowerCAmelCase = f"{default_version.major}.{default_version.minor + 1}.0"
# Now let's ask nicely if that's the right one.
lowerCAmelCase = input(f"Which version are you releasing? [{default_version}]" )
if len(A__ ) == 0:
lowerCAmelCase = default_version
print(f"Updating version to {version}." )
global_version_update(A__ , patch=A__ )
if not patch:
print("Cleaning main README, don't forget to run `make fix-copies`." )
clean_main_ref_in_model_list()
def __a ( ) -> Tuple:
lowerCAmelCase = get_version()
lowerCAmelCase = f"{current_version.major}.{current_version.minor + 1}.0.dev0"
lowerCAmelCase = current_version.base_version
# Check with the user we got that right.
lowerCAmelCase = input(f"Which version are we developing now? [{dev_version}]" )
if len(A__ ) == 0:
lowerCAmelCase = dev_version
print(f"Updating version to {version}." )
global_version_update(A__ )
print("Cleaning main README, don't forget to run `make fix-copies`." )
clean_main_ref_in_model_list()
if __name__ == "__main__":
lowercase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument('--post_release', action='store_true', help='Whether this is pre or post release.')
parser.add_argument('--patch', action='store_true', help='Whether or not this is a patch release.')
lowercase : Optional[Any] = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('Nothing to do after a patch :-)')
else:
post_release_work()
| 649
| 1
|
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
_A : Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name
class a ( _UpperCAmelCase ,_UpperCAmelCase ):
@register_to_config
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : bool , SCREAMING_SNAKE_CASE_ : Optional[int] = None , SCREAMING_SNAKE_CASE_ : Optional[int] = None ):
super().__init__()
__lowerCamelCase: str = learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
__lowerCamelCase: Dict = torch.zeros(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
else:
__lowerCamelCase: Optional[Any] = None
__lowerCamelCase: List[Any] = torch.nn.Parameter(SCREAMING_SNAKE_CASE_ )
class a ( _UpperCAmelCase ):
UpperCAmelCase__ : VQModel
UpperCAmelCase__ : CLIPTextModel
UpperCAmelCase__ : CLIPTokenizer
UpperCAmelCase__ : TransformeraDModel
UpperCAmelCase__ : LearnedClassifierFreeSamplingEmbeddings
UpperCAmelCase__ : VQDiffusionScheduler
def __init__( self : int , SCREAMING_SNAKE_CASE_ : VQModel , SCREAMING_SNAKE_CASE_ : CLIPTextModel , SCREAMING_SNAKE_CASE_ : CLIPTokenizer , SCREAMING_SNAKE_CASE_ : TransformeraDModel , SCREAMING_SNAKE_CASE_ : VQDiffusionScheduler , SCREAMING_SNAKE_CASE_ : LearnedClassifierFreeSamplingEmbeddings , ):
super().__init__()
self.register_modules(
vqvae=SCREAMING_SNAKE_CASE_ , transformer=SCREAMING_SNAKE_CASE_ , text_encoder=SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ , learned_classifier_free_sampling_embeddings=SCREAMING_SNAKE_CASE_ , )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Optional[int] ):
__lowerCamelCase: Dict = len(SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else 1
# get prompt text embeddings
__lowerCamelCase: str = self.tokenizer(
SCREAMING_SNAKE_CASE_ , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , )
__lowerCamelCase: List[str] = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
__lowerCamelCase: int = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
F' {self.tokenizer.model_max_length} tokens: {removed_text}' )
__lowerCamelCase: int = text_input_ids[:, : self.tokenizer.model_max_length]
__lowerCamelCase: Any = self.text_encoder(text_input_ids.to(self.device ) )[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
__lowerCamelCase: str = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=SCREAMING_SNAKE_CASE_ )
# duplicate text embeddings for each generation per prompt
__lowerCamelCase: Union[str, Any] = prompt_embeds.repeat_interleave(SCREAMING_SNAKE_CASE_ , dim=0 )
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
__lowerCamelCase: Dict = self.learned_classifier_free_sampling_embeddings.embeddings
__lowerCamelCase: str = negative_prompt_embeds.unsqueeze(0 ).repeat(SCREAMING_SNAKE_CASE_ , 1 , 1 )
else:
__lowerCamelCase: Union[str, Any] = [""""""] * batch_size
__lowerCamelCase: int = text_input_ids.shape[-1]
__lowerCamelCase: Any = self.tokenizer(
SCREAMING_SNAKE_CASE_ , padding="""max_length""" , max_length=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , return_tensors="""pt""" , )
__lowerCamelCase: Optional[int] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# See comment for normalizing text embeddings
__lowerCamelCase: Union[str, Any] = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=SCREAMING_SNAKE_CASE_ )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__lowerCamelCase: Tuple = negative_prompt_embeds.shape[1]
__lowerCamelCase: Optional[Any] = negative_prompt_embeds.repeat(1 , SCREAMING_SNAKE_CASE_ , 1 )
__lowerCamelCase: List[str] = negative_prompt_embeds.view(batch_size * num_images_per_prompt , SCREAMING_SNAKE_CASE_ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__lowerCamelCase: List[str] = torch.cat([negative_prompt_embeds, prompt_embeds] )
return prompt_embeds
@torch.no_grad()
def __call__( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Union[str, List[str]] , SCREAMING_SNAKE_CASE_ : int = 100 , SCREAMING_SNAKE_CASE_ : float = 5.0 , SCREAMING_SNAKE_CASE_ : float = 1.0 , SCREAMING_SNAKE_CASE_ : int = 1 , SCREAMING_SNAKE_CASE_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , SCREAMING_SNAKE_CASE_ : Optional[torch.FloatTensor] = None , SCREAMING_SNAKE_CASE_ : Optional[str] = "pil" , SCREAMING_SNAKE_CASE_ : bool = True , SCREAMING_SNAKE_CASE_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , SCREAMING_SNAKE_CASE_ : int = 1 , ):
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
__lowerCamelCase: Optional[Any] = 1
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
__lowerCamelCase: Dict = len(SCREAMING_SNAKE_CASE_ )
else:
raise ValueError(F'`prompt` has to be of type `str` or `list` but is {type(SCREAMING_SNAKE_CASE_ )}' )
__lowerCamelCase: List[Any] = batch_size * num_images_per_prompt
__lowerCamelCase: int = guidance_scale > 1.0
__lowerCamelCase: str = self._encode_prompt(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or callback_steps <= 0)
):
raise ValueError(
F'`callback_steps` has to be a positive integer but is {callback_steps} of type'
F' {type(SCREAMING_SNAKE_CASE_ )}.' )
# get the initial completely masked latents unless the user supplied it
__lowerCamelCase: Dict = (batch_size, self.transformer.num_latent_pixels)
if latents is None:
__lowerCamelCase: Optional[Any] = self.transformer.num_vector_embeds - 1
__lowerCamelCase: str = torch.full(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).to(self.device )
else:
if latents.shape != latents_shape:
raise ValueError(F'Unexpected latents shape, got {latents.shape}, expected {latents_shape}' )
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
"""Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,"""
F' {self.transformer.num_vector_embeds - 1} (inclusive).' )
__lowerCamelCase: Union[str, Any] = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ , device=self.device )
__lowerCamelCase: int = self.scheduler.timesteps.to(self.device )
__lowerCamelCase: Union[str, Any] = latents
for i, t in enumerate(self.progress_bar(SCREAMING_SNAKE_CASE_ ) ):
# expand the sample if we are doing classifier free guidance
__lowerCamelCase: Optional[Any] = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
__lowerCamelCase: str = self.transformer(SCREAMING_SNAKE_CASE_ , encoder_hidden_states=SCREAMING_SNAKE_CASE_ , timestep=SCREAMING_SNAKE_CASE_ ).sample
if do_classifier_free_guidance:
__lowerCamelCase , __lowerCamelCase: Any = model_output.chunk(2 )
__lowerCamelCase: int = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(SCREAMING_SNAKE_CASE_ , dim=1 , keepdim=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: str = self.truncate(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# remove `log(0)`'s (`-inf`s)
__lowerCamelCase: Tuple = model_output.clamp(-70 )
# compute the previous noisy sample x_t -> x_t-1
__lowerCamelCase: List[Any] = self.scheduler.step(SCREAMING_SNAKE_CASE_ , timestep=SCREAMING_SNAKE_CASE_ , sample=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: int = self.vqvae.config.vq_embed_dim
__lowerCamelCase: Union[str, Any] = (batch_size, self.transformer.height, self.transformer.width, embedding_channels)
__lowerCamelCase: Optional[Any] = self.vqvae.quantize.get_codebook_entry(SCREAMING_SNAKE_CASE_ , shape=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: List[str] = self.vqvae.decode(SCREAMING_SNAKE_CASE_ , force_not_quantize=SCREAMING_SNAKE_CASE_ ).sample
__lowerCamelCase: List[str] = (image / 2 + 0.5).clamp(0 , 1 )
__lowerCamelCase: str = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__lowerCamelCase: Optional[Any] = self.numpy_to_pil(SCREAMING_SNAKE_CASE_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : List[str] , SCREAMING_SNAKE_CASE_ : torch.FloatTensor , SCREAMING_SNAKE_CASE_ : float ):
__lowerCamelCase , __lowerCamelCase: List[Any] = torch.sort(SCREAMING_SNAKE_CASE_ , 1 , descending=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: Optional[Any] = torch.exp(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: List[Any] = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
__lowerCamelCase: Dict = torch.full_like(keep_mask[:, 0:1, :] , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: List[Any] = torch.cat((all_true, keep_mask) , dim=1 )
__lowerCamelCase: Dict = keep_mask[:, :-1, :]
__lowerCamelCase: Optional[Any] = keep_mask.gather(1 , indices.argsort(1 ) )
__lowerCamelCase: Any = log_p_x_0.clone()
__lowerCamelCase: Tuple = -torch.inf # -inf = log(0)
return rv
| 189
|
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class a ( _UpperCAmelCase ):
UpperCAmelCase__ : Dict = "Speech2TextFeatureExtractor"
UpperCAmelCase__ : str = "Speech2TextTokenizer"
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Optional[int] ):
super().__init__(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: Any = self.feature_extractor
__lowerCamelCase: List[Any] = False
def __call__( self : List[str] , *SCREAMING_SNAKE_CASE_ : Union[str, Any] , **SCREAMING_SNAKE_CASE_ : List[Any] ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
if "raw_speech" in kwargs:
warnings.warn("""Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.""" )
__lowerCamelCase: Dict = kwargs.pop("""raw_speech""" )
else:
__lowerCamelCase: int = kwargs.pop("""audio""" , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: Dict = kwargs.pop("""sampling_rate""" , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: int = kwargs.pop("""text""" , SCREAMING_SNAKE_CASE_ )
if len(SCREAMING_SNAKE_CASE_ ) > 0:
__lowerCamelCase: Dict = args[0]
__lowerCamelCase: Optional[Any] = args[1:]
if audio is None and text is None:
raise ValueError("""You need to specify either an `audio` or `text` input to process.""" )
if audio is not None:
__lowerCamelCase: List[str] = self.feature_extractor(SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , sampling_rate=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
if text is not None:
__lowerCamelCase: Any = self.tokenizer(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
__lowerCamelCase: List[Any] = encodings["""input_ids"""]
return inputs
def SCREAMING_SNAKE_CASE__ ( self : List[str] , *SCREAMING_SNAKE_CASE_ : Optional[Any] , **SCREAMING_SNAKE_CASE_ : str ):
return self.tokenizer.batch_decode(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : Tuple , *SCREAMING_SNAKE_CASE_ : Any , **SCREAMING_SNAKE_CASE_ : List[Any] ):
return self.tokenizer.decode(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
@contextmanager
def SCREAMING_SNAKE_CASE__ ( self : Any ):
warnings.warn(
"""`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your """
"""labels by using the argument `text` of the regular `__call__` method (either in the same call as """
"""your audio inputs, or in a separate call.""" )
__lowerCamelCase: Any = True
__lowerCamelCase: Any = self.tokenizer
yield
__lowerCamelCase: Any = self.feature_extractor
__lowerCamelCase: Optional[Any] = False
| 189
| 1
|
'''simple docstring'''
import re
from typing import Callable, List, Optional, Union
import tensorflow as tf
try:
from tensorflow.keras.optimizers.legacy import Adam
except ImportError:
from tensorflow.keras.optimizers import Adam
class _snake_case (tf.keras.optimizers.schedules.LearningRateSchedule):
def __init__( self ,_snake_case ,_snake_case ,_snake_case ,_snake_case = 1.0 ,_snake_case = None ,):
super().__init__()
UpperCAmelCase_ : int = initial_learning_rate
UpperCAmelCase_ : Any = warmup_steps
UpperCAmelCase_ : Tuple = power
UpperCAmelCase_ : Any = decay_schedule_fn
UpperCAmelCase_ : Union[str, Any] = name
def __call__( self ,_snake_case ):
with tf.name_scope(self.name or "WarmUp" ) as name:
# Implements polynomial warmup. i.e., if global_step < warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
UpperCAmelCase_ : Tuple = tf.cast(_snake_case ,tf.floataa )
UpperCAmelCase_ : Optional[Any] = tf.cast(self.warmup_steps ,tf.floataa )
UpperCAmelCase_ : List[str] = global_step_float / warmup_steps_float
UpperCAmelCase_ : List[str] = self.initial_learning_rate * tf.math.pow(_snake_case ,self.power )
return tf.cond(
global_step_float < warmup_steps_float ,lambda: warmup_learning_rate ,lambda: self.decay_schedule_fn(step - self.warmup_steps ) ,name=_snake_case ,)
def UpperCamelCase__ ( self ):
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_schedule_fn": self.decay_schedule_fn,
"warmup_steps": self.warmup_steps,
"power": self.power,
"name": self.name,
}
def a__ ( _SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : float = 0.0 , _SCREAMING_SNAKE_CASE : float = 0.9 , _SCREAMING_SNAKE_CASE : float = 0.999 , _SCREAMING_SNAKE_CASE : float = 1E-8 , _SCREAMING_SNAKE_CASE : Optional[float] = None , _SCREAMING_SNAKE_CASE : Optional[float] = None , _SCREAMING_SNAKE_CASE : float = 0.0 , _SCREAMING_SNAKE_CASE : float = 1.0 , _SCREAMING_SNAKE_CASE : Optional[List[str]] = None , ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : List[str] = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=_SCREAMING_SNAKE_CASE , decay_steps=num_train_steps - num_warmup_steps , end_learning_rate=init_lr * min_lr_ratio , power=_SCREAMING_SNAKE_CASE , )
if num_warmup_steps:
UpperCAmelCase_ : Tuple = WarmUp(
initial_learning_rate=_SCREAMING_SNAKE_CASE , decay_schedule_fn=_SCREAMING_SNAKE_CASE , warmup_steps=_SCREAMING_SNAKE_CASE , )
if weight_decay_rate > 0.0:
UpperCAmelCase_ : Any = AdamWeightDecay(
learning_rate=_SCREAMING_SNAKE_CASE , weight_decay_rate=_SCREAMING_SNAKE_CASE , beta_a=_SCREAMING_SNAKE_CASE , beta_a=_SCREAMING_SNAKE_CASE , epsilon=_SCREAMING_SNAKE_CASE , clipnorm=_SCREAMING_SNAKE_CASE , global_clipnorm=_SCREAMING_SNAKE_CASE , exclude_from_weight_decay=["LayerNorm", "layer_norm", "bias"] , include_in_weight_decay=_SCREAMING_SNAKE_CASE , )
else:
UpperCAmelCase_ : Tuple = tf.keras.optimizers.Adam(
learning_rate=_SCREAMING_SNAKE_CASE , beta_a=_SCREAMING_SNAKE_CASE , beta_a=_SCREAMING_SNAKE_CASE , epsilon=_SCREAMING_SNAKE_CASE , clipnorm=_SCREAMING_SNAKE_CASE , global_clipnorm=_SCREAMING_SNAKE_CASE , )
# We return the optimizer and the LR scheduler in order to better track the
# evolution of the LR independently of the optimizer.
return optimizer, lr_schedule
class _snake_case (__SCREAMING_SNAKE_CASE):
def __init__( self ,_snake_case = 0.001 ,_snake_case = 0.9 ,_snake_case = 0.999 ,_snake_case = 1E-7 ,_snake_case = False ,_snake_case = 0.0 ,_snake_case = None ,_snake_case = None ,_snake_case = "AdamWeightDecay" ,**_snake_case ,):
super().__init__(_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,**_snake_case )
UpperCAmelCase_ : List[str] = weight_decay_rate
UpperCAmelCase_ : Optional[int] = include_in_weight_decay
UpperCAmelCase_ : Optional[Any] = exclude_from_weight_decay
@classmethod
def UpperCamelCase__ ( cls ,_snake_case ):
UpperCAmelCase_ : Union[str, Any] = {"WarmUp": WarmUp}
return super(_snake_case ,cls ).from_config(_snake_case ,custom_objects=_snake_case )
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case ):
super(_snake_case ,self )._prepare_local(_snake_case ,_snake_case ,_snake_case )
UpperCAmelCase_ : int = tf.constant(
self.weight_decay_rate ,name="adam_weight_decay_rate" )
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case ):
UpperCAmelCase_ : Tuple = self._do_use_weight_decay(var.name )
if do_decay:
return var.assign_sub(
learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]["weight_decay_rate"] ,use_locking=self._use_locking ,)
return tf.no_op()
def UpperCamelCase__ ( self ,_snake_case ,_snake_case=None ,**_snake_case ):
UpperCAmelCase_ , UpperCAmelCase_ : str = list(zip(*_snake_case ) )
return super(_snake_case ,self ).apply_gradients(zip(_snake_case ,_snake_case ) ,name=_snake_case ,**_snake_case )
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case ):
if apply_state is None:
return self._decayed_lr_t[var_dtype], {}
UpperCAmelCase_ : str = apply_state or {}
UpperCAmelCase_ : Any = apply_state.get((var_device, var_dtype) )
if coefficients is None:
UpperCAmelCase_ : List[str] = self._fallback_apply_state(_snake_case ,_snake_case )
UpperCAmelCase_ : str = coefficients
return coefficients["lr_t"], {"apply_state": apply_state}
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case=None ):
UpperCAmelCase_ , UpperCAmelCase_ : Any = self._get_lr(var.device ,var.dtype.base_dtype ,_snake_case )
UpperCAmelCase_ : List[str] = self._decay_weights_op(_snake_case ,_snake_case ,_snake_case )
with tf.control_dependencies([decay] ):
return super(_snake_case ,self )._resource_apply_dense(_snake_case ,_snake_case ,**_snake_case )
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case ,_snake_case=None ):
UpperCAmelCase_ , UpperCAmelCase_ : Dict = self._get_lr(var.device ,var.dtype.base_dtype ,_snake_case )
UpperCAmelCase_ : Any = self._decay_weights_op(_snake_case ,_snake_case ,_snake_case )
with tf.control_dependencies([decay] ):
return super(_snake_case ,self )._resource_apply_sparse(_snake_case ,_snake_case ,_snake_case ,**_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : int = super().get_config()
config.update({"weight_decay_rate": self.weight_decay_rate} )
return config
def UpperCamelCase__ ( self ,_snake_case ):
if self.weight_decay_rate == 0:
return False
if self._include_in_weight_decay:
for r in self._include_in_weight_decay:
if re.search(_snake_case ,_snake_case ) is not None:
return True
if self._exclude_from_weight_decay:
for r in self._exclude_from_weight_decay:
if re.search(_snake_case ,_snake_case ) is not None:
return False
return True
class _snake_case (__SCREAMING_SNAKE_CASE):
def __init__( self ):
UpperCAmelCase_ : Any = []
UpperCAmelCase_ : Union[str, Any] = None
@property
def UpperCamelCase__ ( self ):
if self._accum_steps is None:
UpperCAmelCase_ : str = tf.Variable(
tf.constant(0 ,dtype=tf.intaa ) ,trainable=_snake_case ,synchronization=tf.VariableSynchronization.ON_READ ,aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA ,)
return self._accum_steps.value()
@property
def UpperCamelCase__ ( self ):
if not self._gradients:
raise ValueError("The accumulator should be called first to initialize the gradients" )
return [gradient.value() if gradient is not None else gradient for gradient in self._gradients]
def __call__( self ,_snake_case ):
if not self._gradients:
UpperCAmelCase_ : List[Any] = self.step # Create the step variable.
self._gradients.extend(
[
tf.Variable(
tf.zeros_like(_snake_case ) ,trainable=_snake_case ,synchronization=tf.VariableSynchronization.ON_READ ,aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA ,)
if gradient is not None
else gradient
for gradient in gradients
] )
if len(_snake_case ) != len(self._gradients ):
raise ValueError(f'''Expected {len(self._gradients )} gradients, but got {len(_snake_case )}''' )
for accum_gradient, gradient in zip(self._gradients ,_snake_case ):
if accum_gradient is not None and gradient is not None:
accum_gradient.assign_add(_snake_case )
self._accum_steps.assign_add(1 )
def UpperCamelCase__ ( self ):
if not self._gradients:
return
self._accum_steps.assign(0 )
for gradient in self._gradients:
if gradient is not None:
gradient.assign(tf.zeros_like(_snake_case ) )
| 71
|
'''simple docstring'''
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = (DPMSolverSinglestepScheduler,)
SCREAMING_SNAKE_CASE__ = (('''num_inference_steps''', 25),)
def lowerCamelCase_ ( self : Any , **lowerCamelCase_ : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = {
"""num_train_timesteps""": 10_00,
"""beta_start""": 0.0_001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""solver_order""": 2,
"""prediction_type""": """epsilon""",
"""thresholding""": False,
"""sample_max_value""": 1.0,
"""algorithm_type""": """dpmsolver++""",
"""solver_type""": """midpoint""",
"""lambda_min_clipped""": -float("""inf""" ),
"""variance_type""": None,
}
config.update(**lowerCamelCase_ )
return config
def lowerCamelCase_ ( self : Any , lowerCamelCase_ : List[str]=0 , **lowerCamelCase_ : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = dict(self.forward_default_kwargs )
SCREAMING_SNAKE_CASE : List[str] = kwargs.pop("""num_inference_steps""" , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.dummy_sample
SCREAMING_SNAKE_CASE : str = 0.1 * sample
SCREAMING_SNAKE_CASE : Any = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
SCREAMING_SNAKE_CASE : Tuple = self.get_scheduler_config(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = scheduler_class(**lowerCamelCase_ )
scheduler.set_timesteps(lowerCamelCase_ )
# copy over dummy past residuals
SCREAMING_SNAKE_CASE : Any = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = scheduler_class.from_pretrained(lowerCamelCase_ )
new_scheduler.set_timesteps(lowerCamelCase_ )
# copy over dummy past residuals
SCREAMING_SNAKE_CASE : Optional[int] = dummy_past_residuals[: new_scheduler.config.solver_order]
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : str = sample, sample
for t in range(lowerCamelCase_ , time_step + scheduler.config.solver_order + 1 ):
SCREAMING_SNAKE_CASE : Union[str, Any] = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ).prev_sample
SCREAMING_SNAKE_CASE : Optional[int] = new_scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
pass
def lowerCamelCase_ ( self : str , lowerCamelCase_ : Union[str, Any]=0 , **lowerCamelCase_ : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = dict(self.forward_default_kwargs )
SCREAMING_SNAKE_CASE : List[Any] = kwargs.pop("""num_inference_steps""" , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = self.dummy_sample
SCREAMING_SNAKE_CASE : Union[str, Any] = 0.1 * sample
SCREAMING_SNAKE_CASE : Union[str, Any] = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
SCREAMING_SNAKE_CASE : int = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : Any = scheduler_class(**lowerCamelCase_ )
scheduler.set_timesteps(lowerCamelCase_ )
# copy over dummy past residuals (must be after setting timesteps)
SCREAMING_SNAKE_CASE : int = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = scheduler_class.from_pretrained(lowerCamelCase_ )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowerCamelCase_ )
# copy over dummy past residual (must be after setting timesteps)
SCREAMING_SNAKE_CASE : Dict = dummy_past_residuals[: new_scheduler.config.solver_order]
SCREAMING_SNAKE_CASE : Union[str, Any] = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ).prev_sample
SCREAMING_SNAKE_CASE : Dict = new_scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : int=None , **lowerCamelCase_ : Union[str, Any] ):
'''simple docstring'''
if scheduler is None:
SCREAMING_SNAKE_CASE : Tuple = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : int = self.get_scheduler_config(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = scheduler_class(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : List[Any] = self.get_scheduler_config(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = scheduler_class(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = 10
SCREAMING_SNAKE_CASE : Any = self.dummy_model()
SCREAMING_SNAKE_CASE : Any = self.dummy_sample_deter
scheduler.set_timesteps(lowerCamelCase_ )
for i, t in enumerate(scheduler.timesteps ):
SCREAMING_SNAKE_CASE : Dict = model(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ).prev_sample
return sample
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
SCREAMING_SNAKE_CASE : List[Any] = 50
SCREAMING_SNAKE_CASE : Dict = self.dummy_model()
SCREAMING_SNAKE_CASE : Any = self.dummy_sample_deter
scheduler.set_timesteps(lowerCamelCase_ )
# make sure that the first t is uneven
for i, t in enumerate(scheduler.timesteps[3:] ):
SCREAMING_SNAKE_CASE : str = model(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ).prev_sample
SCREAMING_SNAKE_CASE : Any = torch.mean(torch.abs(lowerCamelCase_ ) )
assert abs(result_mean.item() - 0.2_574 ) < 1e-3
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
for timesteps in [25, 50, 1_00, 9_99, 10_00]:
self.check_over_configs(num_train_timesteps=lowerCamelCase_ )
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
SCREAMING_SNAKE_CASE : int = self.full_loop(scheduler=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = torch.mean(torch.abs(lowerCamelCase_ ) )
assert abs(result_mean.item() - 0.2_791 ) < 1e-3
SCREAMING_SNAKE_CASE : List[str] = DEISMultistepScheduler.from_config(scheduler.config )
SCREAMING_SNAKE_CASE : Optional[int] = DPMSolverMultistepScheduler.from_config(scheduler.config )
SCREAMING_SNAKE_CASE : Any = UniPCMultistepScheduler.from_config(scheduler.config )
SCREAMING_SNAKE_CASE : Optional[int] = DPMSolverSinglestepScheduler.from_config(scheduler.config )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.full_loop(scheduler=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = torch.mean(torch.abs(lowerCamelCase_ ) )
assert abs(result_mean.item() - 0.2_791 ) < 1e-3
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
self.check_over_configs(thresholding=lowerCamelCase_ )
for order in [1, 2, 3]:
for solver_type in ["midpoint", "heun"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=lowerCamelCase_ , prediction_type=lowerCamelCase_ , sample_max_value=lowerCamelCase_ , algorithm_type="""dpmsolver++""" , solver_order=lowerCamelCase_ , solver_type=lowerCamelCase_ , )
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCamelCase_ )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
for algorithm_type in ["dpmsolver", "dpmsolver++"]:
for solver_type in ["midpoint", "heun"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=lowerCamelCase_ , solver_type=lowerCamelCase_ , prediction_type=lowerCamelCase_ , algorithm_type=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : int = self.full_loop(
solver_order=lowerCamelCase_ , solver_type=lowerCamelCase_ , prediction_type=lowerCamelCase_ , algorithm_type=lowerCamelCase_ , )
assert not torch.isnan(lowerCamelCase_ ).any(), "Samples have nan numbers"
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
self.check_over_configs(lower_order_final=lowerCamelCase_ )
self.check_over_configs(lower_order_final=lowerCamelCase_ )
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
self.check_over_configs(lambda_min_clipped=-float("""inf""" ) )
self.check_over_configs(lambda_min_clipped=-5.1 )
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
self.check_over_configs(variance_type=lowerCamelCase_ )
self.check_over_configs(variance_type="""learned_range""" )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
for num_inference_steps in [1, 2, 3, 5, 10, 50, 1_00, 9_99, 10_00]:
self.check_over_forward(num_inference_steps=lowerCamelCase_ , time_step=0 )
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.full_loop()
SCREAMING_SNAKE_CASE : Any = torch.mean(torch.abs(lowerCamelCase_ ) )
assert abs(result_mean.item() - 0.2_791 ) < 1e-3
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.full_loop(use_karras_sigmas=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = torch.mean(torch.abs(lowerCamelCase_ ) )
assert abs(result_mean.item() - 0.2_248 ) < 1e-3
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.full_loop(prediction_type="""v_prediction""" )
SCREAMING_SNAKE_CASE : int = torch.mean(torch.abs(lowerCamelCase_ ) )
assert abs(result_mean.item() - 0.1_453 ) < 1e-3
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.full_loop(prediction_type="""v_prediction""" , use_karras_sigmas=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = torch.mean(torch.abs(lowerCamelCase_ ) )
assert abs(result_mean.item() - 0.0_649 ) < 1e-3
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : List[str] = self.get_scheduler_config(thresholding=lowerCamelCase_ , dynamic_thresholding_ratio=0 )
SCREAMING_SNAKE_CASE : Union[str, Any] = scheduler_class(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = 10
SCREAMING_SNAKE_CASE : List[Any] = self.dummy_model()
SCREAMING_SNAKE_CASE : int = self.dummy_sample_deter.half()
scheduler.set_timesteps(lowerCamelCase_ )
for i, t in enumerate(scheduler.timesteps ):
SCREAMING_SNAKE_CASE : List[Any] = model(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ).prev_sample
assert sample.dtype == torch.floataa
| 379
| 0
|
import math
def UpperCamelCase ( _A , _A ) -> List[str]:
if 0 not in (x, y):
# We use the relation x^y = y*log10(x), where 10 is the base.
return y * math.logaa(_A )
else:
if x == 0: # 0 raised to any number is 0
return 0
elif y == 0:
return 1 # any number raised to 0 is 1
raise AssertionError("""This should never happen""" )
if __name__ == "__main__": # Main function
# Read two numbers from input and typecast them to int using map function.
# Here x is the base and y is the power.
_lowerCAmelCase = 'Enter the base and the power separated by a comma: '
_lowerCAmelCase , _lowerCAmelCase = map(int, input(prompt).split(','))
_lowerCAmelCase , _lowerCAmelCase = map(int, input(prompt).split(','))
# We find the log of each number, using the function res(), which takes two
# arguments.
_lowerCAmelCase = res(xa, ya)
_lowerCAmelCase = res(xa, ya)
# We check for the largest number
if resa > resa:
print('Largest number is', xa, '^', ya)
elif resa > resa:
print('Largest number is', xa, '^', ya)
else:
print('Both are equal')
| 700
|
"""simple docstring"""
import argparse
import os
import re
import packaging.version
_lowerCAmelCase = 'examples/'
_lowerCAmelCase = {
'examples': (re.compile(r'^check_min_version\("[^"]+"\)\s*$', re.MULTILINE), 'check_min_version("VERSION")\n'),
'init': (re.compile(r'^__version__\s+=\s+"([^"]+)"\s*$', re.MULTILINE), '__version__ = "VERSION"\n'),
'setup': (re.compile(r'^(\s*)version\s*=\s*"[^"]+",', re.MULTILINE), r'\1version="VERSION",'),
'doc': (re.compile(r'^(\s*)release\s*=\s*"[^"]+"$', re.MULTILINE), 'release = "VERSION"\n'),
}
_lowerCAmelCase = {
'init': 'src/transformers/__init__.py',
'setup': 'setup.py',
}
_lowerCAmelCase = 'README.md'
def UpperCamelCase ( _A , _A , _A ) -> Optional[int]:
with open(_A , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
lowercase : str = f.read()
lowercase , lowercase : Optional[int] = REPLACE_PATTERNS[pattern]
lowercase : Optional[Any] = replace.replace("""VERSION""" , _A )
lowercase : Tuple = re_pattern.sub(_A , _A )
with open(_A , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.write(_A )
def UpperCamelCase ( _A ) -> List[str]:
for folder, directories, fnames in os.walk(_A ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("""research_projects""" )
if "legacy" in directories:
directories.remove("""legacy""" )
for fname in fnames:
if fname.endswith(""".py""" ):
update_version_in_file(os.path.join(_A , _A ) , _A , pattern="""examples""" )
def UpperCamelCase ( _A , _A=False ) -> Optional[Any]:
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(_A , _A , _A )
if not patch:
update_version_in_examples(_A )
def UpperCamelCase ( ) -> Any:
lowercase : Any = """🤗 Transformers currently provides the following architectures"""
lowercase : Dict = """1. Want to contribute a new model?"""
with open(_A , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
lowercase : str = f.readlines()
# Find the start of the list.
lowercase : int = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
lowercase : Optional[int] = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("""1.""" ):
lowercase : List[str] = lines[index].replace(
"""https://huggingface.co/docs/transformers/main/model_doc""" , """https://huggingface.co/docs/transformers/model_doc""" , )
index += 1
with open(_A , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(_A )
def UpperCamelCase ( ) -> Tuple:
with open(REPLACE_FILES["""init"""] , """r""" ) as f:
lowercase : List[str] = f.read()
lowercase : List[Any] = REPLACE_PATTERNS["""init"""][0].search(_A ).groups()[0]
return packaging.version.parse(_A )
def UpperCamelCase ( _A=False ) -> List[Any]:
lowercase : str = get_version()
if patch and default_version.is_devrelease:
raise ValueError("""Can't create a patch version from the dev branch, checkout a released version!""" )
if default_version.is_devrelease:
lowercase : Union[str, Any] = default_version.base_version
elif patch:
lowercase : Optional[Any] = F"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}"""
else:
lowercase : List[str] = F"""{default_version.major}.{default_version.minor + 1}.0"""
# Now let's ask nicely if that's the right one.
lowercase : Optional[int] = input(F"""Which version are you releasing? [{default_version}]""" )
if len(_A ) == 0:
lowercase : Optional[int] = default_version
print(F"""Updating version to {version}.""" )
global_version_update(_A , patch=_A )
if not patch:
print("""Cleaning main README, don't forget to run `make fix-copies`.""" )
clean_main_ref_in_model_list()
def UpperCamelCase ( ) -> Tuple:
lowercase : Any = get_version()
lowercase : Optional[int] = F"""{current_version.major}.{current_version.minor + 1}.0.dev0"""
lowercase : Dict = current_version.base_version
# Check with the user we got that right.
lowercase : List[Any] = input(F"""Which version are we developing now? [{dev_version}]""" )
if len(_A ) == 0:
lowercase : int = dev_version
print(F"""Updating version to {version}.""" )
global_version_update(_A )
print("""Cleaning main README, don't forget to run `make fix-copies`.""" )
clean_main_ref_in_model_list()
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument('--post_release', action='store_true', help='Whether this is pre or post release.')
parser.add_argument('--patch', action='store_true', help='Whether or not this is a patch release.')
_lowerCAmelCase = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('Nothing to do after a patch :-)')
else:
post_release_work()
| 348
| 0
|
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case_ ( self : Any ):
__lowercase : Dict = tempfile.mkdtemp()
__lowercase : List[str] = BlipImageProcessor()
__lowercase : Union[str, Any] = BertTokenizer.from_pretrained('''hf-internal-testing/tiny-random-BertModel''' )
__lowercase : Optional[int] = BlipProcessor(_snake_case , _snake_case )
processor.save_pretrained(self.tmpdirname )
def snake_case_ ( self : List[str] , **_snake_case : str ):
return AutoProcessor.from_pretrained(self.tmpdirname , **_snake_case ).tokenizer
def snake_case_ ( self : int , **_snake_case : int ):
return AutoProcessor.from_pretrained(self.tmpdirname , **_snake_case ).image_processor
def snake_case_ ( self : Optional[Any] ):
shutil.rmtree(self.tmpdirname )
def snake_case_ ( self : Dict ):
__lowercase : Optional[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__lowercase : List[str] = [Image.fromarray(np.moveaxis(_snake_case , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def snake_case_ ( self : List[str] ):
__lowercase : int = BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__lowercase : List[str] = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
__lowercase : int = self.get_image_processor(do_normalize=_snake_case , padding_value=1.0 )
__lowercase : Dict = BlipProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_snake_case , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _snake_case )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _snake_case )
def snake_case_ ( self : Dict ):
__lowercase : Optional[Any] = self.get_image_processor()
__lowercase : Union[str, Any] = self.get_tokenizer()
__lowercase : Union[str, Any] = BlipProcessor(tokenizer=_snake_case , image_processor=_snake_case )
__lowercase : Any = self.prepare_image_inputs()
__lowercase : List[Any] = image_processor(_snake_case , return_tensors='''np''' )
__lowercase : List[Any] = processor(images=_snake_case , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def snake_case_ ( self : Union[str, Any] ):
__lowercase : List[str] = self.get_image_processor()
__lowercase : Optional[int] = self.get_tokenizer()
__lowercase : Any = BlipProcessor(tokenizer=_snake_case , image_processor=_snake_case )
__lowercase : Dict = "lower newer"
__lowercase : str = processor(text=_snake_case )
__lowercase : Any = tokenizer(_snake_case , return_token_type_ids=_snake_case )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def snake_case_ ( self : Union[str, Any] ):
__lowercase : Tuple = self.get_image_processor()
__lowercase : List[str] = self.get_tokenizer()
__lowercase : Dict = BlipProcessor(tokenizer=_snake_case , image_processor=_snake_case )
__lowercase : Union[str, Any] = "lower newer"
__lowercase : Optional[Any] = self.prepare_image_inputs()
__lowercase : Optional[Any] = processor(text=_snake_case , images=_snake_case )
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''input_ids''', '''attention_mask'''] )
# test if it raises when no input is passed
with pytest.raises(_snake_case ):
processor()
def snake_case_ ( self : Union[str, Any] ):
__lowercase : Tuple = self.get_image_processor()
__lowercase : Union[str, Any] = self.get_tokenizer()
__lowercase : Dict = BlipProcessor(tokenizer=_snake_case , image_processor=_snake_case )
__lowercase : List[str] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__lowercase : Any = processor.batch_decode(_snake_case )
__lowercase : Union[str, Any] = tokenizer.batch_decode(_snake_case )
self.assertListEqual(_snake_case , _snake_case )
def snake_case_ ( self : int ):
__lowercase : Dict = self.get_image_processor()
__lowercase : Dict = self.get_tokenizer()
__lowercase : Dict = BlipProcessor(tokenizer=_snake_case , image_processor=_snake_case )
__lowercase : Tuple = "lower newer"
__lowercase : Optional[int] = self.prepare_image_inputs()
__lowercase : Union[str, Any] = processor(text=_snake_case , images=_snake_case )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''input_ids''', '''attention_mask'''] )
| 509
|
from argparse import ArgumentParser
from ..pipelines import Pipeline, PipelineDataFormat, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) # pylint: disable=invalid-name
def lowercase ( a ):
'''simple docstring'''
if not path:
return "pipe"
for ext in PipelineDataFormat.SUPPORTED_FORMATS:
if path.endswith(a ):
return ext
raise Exception(
F"Unable to determine file format from file extension {path}. "
F"Please provide the format through --format {PipelineDataFormat.SUPPORTED_FORMATS}" )
def lowercase ( a ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ :Tuple = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
SCREAMING_SNAKE_CASE_ :Any = try_infer_format_from_ext(args.input ) if args.format == "infer" else args.format
SCREAMING_SNAKE_CASE_ :str = PipelineDataFormat.from_str(
format=a , output_path=args.output , input_path=args.input , column=args.column if args.column else nlp.default_input_names , overwrite=args.overwrite , )
return RunCommand(a , a )
class _UpperCAmelCase ( lowercase ):
def __init__( self : List[Any] , UpperCAmelCase : Pipeline , UpperCAmelCase : PipelineDataFormat):
SCREAMING_SNAKE_CASE_ :int = nlp
SCREAMING_SNAKE_CASE_ :Any = reader
@staticmethod
def _snake_case ( UpperCAmelCase : ArgumentParser):
SCREAMING_SNAKE_CASE_ :Any = parser.add_parser("run" , help="Run a pipeline through the CLI")
run_parser.add_argument("--task" , choices=get_supported_tasks() , help="Task to run")
run_parser.add_argument("--input" , type=UpperCAmelCase , help="Path to the file to use for inference")
run_parser.add_argument("--output" , type=UpperCAmelCase , help="Path to the file that will be used post to write results.")
run_parser.add_argument("--model" , type=UpperCAmelCase , help="Name or path to the model to instantiate.")
run_parser.add_argument("--config" , type=UpperCAmelCase , help="Name or path to the model's config to instantiate.")
run_parser.add_argument(
"--tokenizer" , type=UpperCAmelCase , help="Name of the tokenizer to use. (default: same as the model name)")
run_parser.add_argument(
"--column" , type=UpperCAmelCase , help="Name of the column to use as input. (For multi columns input as QA use column1,columns2)" , )
run_parser.add_argument(
"--format" , type=UpperCAmelCase , default="infer" , choices=PipelineDataFormat.SUPPORTED_FORMATS , help="Input format to read from" , )
run_parser.add_argument(
"--device" , type=UpperCAmelCase , default=-1 , help="Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)" , )
run_parser.add_argument("--overwrite" , action="store_true" , help="Allow overwriting the output file.")
run_parser.set_defaults(func=UpperCAmelCase)
def _snake_case ( self : List[str]):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ :Union[str, Any] = self._nlp, []
for entry in self._reader:
SCREAMING_SNAKE_CASE_ :int = nlp(**UpperCAmelCase) if self._reader.is_multi_columns else nlp(UpperCAmelCase)
if isinstance(UpperCAmelCase , UpperCAmelCase):
outputs.append(UpperCAmelCase)
else:
outputs += output
# Saving data
if self._nlp.binary_output:
SCREAMING_SNAKE_CASE_ :Dict = self._reader.save_binary(UpperCAmelCase)
logger.warning(F"Current pipeline requires output to be in binary format, saving at {binary_path}")
else:
self._reader.save(UpperCAmelCase)
| 631
| 0
|
from math import pi, sqrt
def __UpperCamelCase ( _lowerCAmelCase ):
"""simple docstring"""
if num <= 0:
raise ValueError("math domain error" )
if num > 171.5:
raise OverflowError("math range error" )
elif num - int(_lowerCAmelCase ) not in (0, 0.5):
raise NotImplementedError("num must be an integer or a half-integer" )
elif num == 0.5:
return sqrt(_lowerCAmelCase )
else:
return 1.0 if num == 1 else (num - 1) * gamma(num - 1 )
def __UpperCamelCase ( ):
"""simple docstring"""
assert gamma(0.5 ) == sqrt(_lowerCAmelCase )
assert gamma(1 ) == 1.0
assert gamma(2 ) == 1.0
if __name__ == "__main__":
from doctest import testmod
testmod()
__lowerCAmelCase =1.0
while num:
__lowerCAmelCase =float(input("Gamma of: "))
print(f"gamma({num}) = {gamma(num)}")
print("\nEnter 0 to exit...")
| 405
|
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
__lowerCAmelCase =logging.getLogger(__name__)
def __UpperCamelCase ( _lowerCAmelCase=2 , _lowerCAmelCase=3 , _lowerCAmelCase=16 , _lowerCAmelCase = 10 , _lowerCAmelCase = 2 ):
"""simple docstring"""
def get_dataset(_lowerCAmelCase ):
UpperCAmelCase = torch.randn(batch_size * n_batches , 1 )
return TensorDataset(_lowerCAmelCase , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) )
UpperCAmelCase = get_dataset(_lowerCAmelCase )
UpperCAmelCase = get_dataset(_lowerCAmelCase )
UpperCAmelCase = DataLoader(_lowerCAmelCase , shuffle=_lowerCAmelCase , batch_size=_lowerCAmelCase , num_workers=4 )
UpperCAmelCase = DataLoader(_lowerCAmelCase , shuffle=_lowerCAmelCase , batch_size=_lowerCAmelCase , num_workers=4 )
return (train_dataloader, valid_dataloader)
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None ):
"""simple docstring"""
UpperCAmelCase = []
for epoch in range(_lowerCAmelCase ):
# Train quickly
model.train()
for batch in dataloader:
UpperCAmelCase , UpperCAmelCase = batch
UpperCAmelCase = model(_lowerCAmelCase )
UpperCAmelCase = torch.nn.functional.mse_loss(_lowerCAmelCase , _lowerCAmelCase )
accelerator.backward(_lowerCAmelCase )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class __magic_name__ ( nn.Module):
def __init__( self : Any ):
super().__init__()
UpperCAmelCase = nn.Parameter(torch.randn(1 ) )
UpperCAmelCase = nn.Parameter(torch.randn(1 ) )
def _UpperCAmelCase ( self : str ,__SCREAMING_SNAKE_CASE : Union[str, Any] ):
return x * self.a + self.b
class __magic_name__ ( unittest.TestCase):
def _UpperCAmelCase ( self : List[str] ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
UpperCAmelCase = DummyModel()
UpperCAmelCase = torch.optim.Adam(params=model.parameters() ,lr=1e-3 )
UpperCAmelCase , UpperCAmelCase = dummy_dataloaders()
UpperCAmelCase = ProjectConfiguration(total_limit=1 ,project_dir=__SCREAMING_SNAKE_CASE ,automatic_checkpoint_naming=__SCREAMING_SNAKE_CASE )
# Train baseline
UpperCAmelCase = Accelerator(project_config=__SCREAMING_SNAKE_CASE )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = accelerator.prepare(
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) ,1 )
def _UpperCAmelCase ( self : str ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
UpperCAmelCase = DummyModel()
UpperCAmelCase = torch.optim.Adam(params=model.parameters() ,lr=1e-3 )
UpperCAmelCase , UpperCAmelCase = dummy_dataloaders()
# Train baseline
UpperCAmelCase = Accelerator()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = accelerator.prepare(
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
# Save initial
UpperCAmelCase = os.path.join(__SCREAMING_SNAKE_CASE ,"initial" )
accelerator.save_state(__SCREAMING_SNAKE_CASE )
((UpperCAmelCase) , (UpperCAmelCase)) = model.a.item(), model.b.item()
UpperCAmelCase = optimizer.state_dict()
UpperCAmelCase = train(3 ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
((UpperCAmelCase) , (UpperCAmelCase)) = model.a.item(), model.b.item()
UpperCAmelCase = optimizer.state_dict()
# Train partially
set_seed(4_2 )
UpperCAmelCase = DummyModel()
UpperCAmelCase = torch.optim.Adam(params=model.parameters() ,lr=1e-3 )
UpperCAmelCase , UpperCAmelCase = dummy_dataloaders()
UpperCAmelCase = Accelerator()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = accelerator.prepare(
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
accelerator.load_state(__SCREAMING_SNAKE_CASE )
((UpperCAmelCase) , (UpperCAmelCase)) = model.a.item(), model.b.item()
UpperCAmelCase = optimizer.state_dict()
self.assertEqual(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
UpperCAmelCase = train(2 ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
# Save everything
UpperCAmelCase = os.path.join(__SCREAMING_SNAKE_CASE ,"checkpoint" )
accelerator.save_state(__SCREAMING_SNAKE_CASE )
# Load everything back in and make sure all states work
accelerator.load_state(__SCREAMING_SNAKE_CASE )
test_rands += train(1 ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
((UpperCAmelCase) , (UpperCAmelCase)) = model.a.item(), model.b.item()
UpperCAmelCase = optimizer.state_dict()
self.assertEqual(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self : Any ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
UpperCAmelCase = DummyModel()
UpperCAmelCase = torch.optim.Adam(params=model.parameters() ,lr=1e-3 )
UpperCAmelCase , UpperCAmelCase = dummy_dataloaders()
UpperCAmelCase = ProjectConfiguration(automatic_checkpoint_naming=__SCREAMING_SNAKE_CASE )
# Train baseline
UpperCAmelCase = Accelerator(project_dir=__SCREAMING_SNAKE_CASE ,project_config=__SCREAMING_SNAKE_CASE )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = accelerator.prepare(
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
# Save initial
accelerator.save_state()
((UpperCAmelCase) , (UpperCAmelCase)) = model.a.item(), model.b.item()
UpperCAmelCase = optimizer.state_dict()
UpperCAmelCase = train(3 ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
((UpperCAmelCase) , (UpperCAmelCase)) = model.a.item(), model.b.item()
UpperCAmelCase = optimizer.state_dict()
# Train partially
set_seed(4_2 )
UpperCAmelCase = DummyModel()
UpperCAmelCase = torch.optim.Adam(params=model.parameters() ,lr=1e-3 )
UpperCAmelCase , UpperCAmelCase = dummy_dataloaders()
UpperCAmelCase = ProjectConfiguration(iteration=1 ,automatic_checkpoint_naming=__SCREAMING_SNAKE_CASE )
UpperCAmelCase = Accelerator(project_dir=__SCREAMING_SNAKE_CASE ,project_config=__SCREAMING_SNAKE_CASE )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = accelerator.prepare(
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
accelerator.load_state(os.path.join(__SCREAMING_SNAKE_CASE ,"checkpoints" ,"checkpoint_0" ) )
((UpperCAmelCase) , (UpperCAmelCase)) = model.a.item(), model.b.item()
UpperCAmelCase = optimizer.state_dict()
self.assertEqual(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
UpperCAmelCase = train(2 ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(__SCREAMING_SNAKE_CASE ,"checkpoints" ,"checkpoint_1" ) )
test_rands += train(1 ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
((UpperCAmelCase) , (UpperCAmelCase)) = model.a.item(), model.b.item()
UpperCAmelCase = optimizer.state_dict()
self.assertEqual(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
self.assertEqual(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self : Optional[int] ):
UpperCAmelCase = torch.tensor([1, 2, 3] )
UpperCAmelCase = torch.tensor([2, 3, 4] )
UpperCAmelCase = DummyModel()
UpperCAmelCase = torch.optim.Adam(net.parameters() )
UpperCAmelCase = Accelerator()
with self.assertRaises(__SCREAMING_SNAKE_CASE ) as ve:
accelerator.register_for_checkpointing(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
UpperCAmelCase = str(ve.exception )
self.assertTrue("Item at index 0" in message )
self.assertTrue("Item at index 1" in message )
self.assertFalse("Item at index 2" in message )
self.assertFalse("Item at index 3" in message )
def _UpperCAmelCase ( self : List[Any] ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
UpperCAmelCase = DummyModel()
UpperCAmelCase = torch.optim.Adam(params=model.parameters() ,lr=1e-3 )
UpperCAmelCase = torch.optim.lr_scheduler.StepLR(__SCREAMING_SNAKE_CASE ,step_size=1 ,gamma=0.99 )
UpperCAmelCase , UpperCAmelCase = dummy_dataloaders()
UpperCAmelCase = ProjectConfiguration(automatic_checkpoint_naming=__SCREAMING_SNAKE_CASE )
# Train baseline
UpperCAmelCase = Accelerator(project_dir=__SCREAMING_SNAKE_CASE ,project_config=__SCREAMING_SNAKE_CASE )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = accelerator.prepare(
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
# Save initial
accelerator.save_state()
UpperCAmelCase = scheduler.state_dict()
train(3 ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
self.assertNotEqual(__SCREAMING_SNAKE_CASE ,scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(__SCREAMING_SNAKE_CASE ,"checkpoints" ,"checkpoint_0" ) )
self.assertEqual(__SCREAMING_SNAKE_CASE ,scheduler.state_dict() )
def _UpperCAmelCase ( self : List[Any] ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2 )
UpperCAmelCase = DummyModel()
UpperCAmelCase = ProjectConfiguration(automatic_checkpoint_naming=__SCREAMING_SNAKE_CASE ,total_limit=2 )
# Train baseline
UpperCAmelCase = Accelerator(project_dir=__SCREAMING_SNAKE_CASE ,project_config=__SCREAMING_SNAKE_CASE )
UpperCAmelCase = accelerator.prepare(__SCREAMING_SNAKE_CASE )
# Save 3 states:
for _ in range(1_1 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(__SCREAMING_SNAKE_CASE ,"checkpoints" ,"checkpoint_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(__SCREAMING_SNAKE_CASE ,"checkpoints" ,"checkpoint_9" ) ) )
self.assertTrue(os.path.exists(os.path.join(__SCREAMING_SNAKE_CASE ,"checkpoints" ,"checkpoint_10" ) ) )
@require_cuda
def _UpperCAmelCase ( self : Optional[Any] ):
UpperCAmelCase = ["torchrun", f'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )]
execute_subprocess_async(__SCREAMING_SNAKE_CASE ,env=os.environ.copy() )
if __name__ == "__main__":
__lowerCAmelCase ="/tmp/accelerate/state_checkpointing"
__lowerCAmelCase =DummyModel()
__lowerCAmelCase =torch.optim.Adam(params=model.parameters(), lr=1e-3)
__lowerCAmelCase =torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99)
__lowerCAmelCase , __lowerCAmelCase =dummy_dataloaders()
__lowerCAmelCase =ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
__lowerCAmelCase =Accelerator(project_dir=savedir, project_config=project_config, mixed_precision="no")
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase =accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
__lowerCAmelCase , __lowerCAmelCase =accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
__lowerCAmelCase =group["params"][0].device
break
assert param_device.type == accelerator.device.type
__lowerCAmelCase =model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="cpu")
for group in optimizer.param_groups:
__lowerCAmelCase =group["params"][0].device
break
assert (
param_device.type == torch.device("cpu").type
), f"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="on_device")
for group in optimizer.param_groups:
__lowerCAmelCase =group["params"][0].device
break
assert (
param_device.type == accelerator.device.type
), f"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match="Unsupported optimizer map location passed"):
accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="invalid")
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 405
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.