code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
)
| 650
|
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import numpy as np
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForMaskedImageModeling,
HfArgumentParser,
Trainer,
TrainingArguments,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
a = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt')
a = list(MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING.keys())
a = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class UpperCamelCase__ :
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default='cifar10' , metadata={'help': 'Name of a dataset from the datasets package'} )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__magic_name__ , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__magic_name__ , metadata={'help': 'The column name of the images in the files. If not set, will try to use \'image\' or \'img\'.'} , )
__SCREAMING_SNAKE_CASE : Optional[str] = field(default=__magic_name__ , metadata={'help': 'A folder containing the training data.'} )
__SCREAMING_SNAKE_CASE : Optional[str] = field(default=__magic_name__ , metadata={'help': 'A folder containing the validation data.'} )
__SCREAMING_SNAKE_CASE : Optional[float] = field(
default=0.1_5 , metadata={'help': 'Percent to split off of train for validation.'} )
__SCREAMING_SNAKE_CASE : int = field(default=32 , metadata={'help': 'The size of the square patches to use for masking.'} )
__SCREAMING_SNAKE_CASE : float = field(
default=0.6 , metadata={'help': 'Percentage of patches to mask.'} , )
__SCREAMING_SNAKE_CASE : Optional[int] = field(
default=__magic_name__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
__SCREAMING_SNAKE_CASE : Optional[int] = field(
default=__magic_name__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
lowercase_ = {}
if self.train_dir is not None:
lowercase_ = self.train_dir
if self.validation_dir is not None:
lowercase_ = self.validation_dir
lowercase_ = data_files if data_files else None
@dataclass
class UpperCamelCase__ :
__SCREAMING_SNAKE_CASE : str = field(
default=__magic_name__ , metadata={
'help': (
'The model checkpoint for weights initialization. Can be a local path to a pytorch_model.bin or a '
'checkpoint identifier on the hub. '
'Don\'t set if you want to train a model from scratch.'
)
} , )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__magic_name__ , metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(__magic_name__ )} , )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__magic_name__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__magic_name__ , metadata={
'help': (
'Override some existing default config settings when a model is trained from scratch. Example: '
'n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'
)
} , )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__magic_name__ , metadata={'help': 'Where do you want to store (cache) the pretrained models/datasets downloaded from the hub'} , )
__SCREAMING_SNAKE_CASE : str = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
__SCREAMING_SNAKE_CASE : str = field(default=__magic_name__ , metadata={'help': 'Name or path of preprocessor config.'} )
__SCREAMING_SNAKE_CASE : bool = field(
default=__magic_name__ , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
__SCREAMING_SNAKE_CASE : Optional[int] = field(
default=__magic_name__ , metadata={
'help': (
'The size (resolution) of each image. If not specified, will use `image_size` of the configuration.'
)
} , )
__SCREAMING_SNAKE_CASE : Optional[int] = field(
default=__magic_name__ , metadata={
'help': (
'The size (resolution) of each patch. If not specified, will use `patch_size` of the configuration.'
)
} , )
__SCREAMING_SNAKE_CASE : Optional[int] = field(
default=__magic_name__ , metadata={'help': 'Stride to use for the encoder.'} , )
class UpperCamelCase__ :
def __init__( self : Dict , UpperCamelCase__ : List[Any]=192 , UpperCamelCase__ : Tuple=32 , UpperCamelCase__ : Dict=4 , UpperCamelCase__ : str=0.6 ):
'''simple docstring'''
lowercase_ = input_size
lowercase_ = mask_patch_size
lowercase_ = model_patch_size
lowercase_ = mask_ratio
if self.input_size % self.mask_patch_size != 0:
raise ValueError("""Input size must be divisible by mask patch size""" )
if self.mask_patch_size % self.model_patch_size != 0:
raise ValueError("""Mask patch size must be divisible by model patch size""" )
lowercase_ = self.input_size // self.mask_patch_size
lowercase_ = self.mask_patch_size // self.model_patch_size
lowercase_ = self.rand_size**2
lowercase_ = int(np.ceil(self.token_count * self.mask_ratio ) )
def __call__( self : int ):
'''simple docstring'''
lowercase_ = np.random.permutation(self.token_count )[: self.mask_count]
lowercase_ = np.zeros(self.token_count , dtype=UpperCamelCase__ )
lowercase_ = 1
lowercase_ = mask.reshape((self.rand_size, self.rand_size) )
lowercase_ = mask.repeat(self.scale , axis=0 ).repeat(self.scale , axis=1 )
return torch.tensor(mask.flatten() )
def UpperCAmelCase_ ( UpperCAmelCase__ ):
lowercase_ = torch.stack([example["""pixel_values"""] for example in examples] )
lowercase_ = torch.stack([example["""mask"""] for example in examples] )
return {"pixel_values": pixel_values, "bool_masked_pos": mask}
def UpperCAmelCase_ ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowercase_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowercase_ , lowercase_ , lowercase_ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowercase_ , lowercase_ , lowercase_ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_mim""" , UpperCAmelCase__ , UpperCAmelCase__ )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowercase_ = training_args.get_process_log_level()
logger.setLevel(UpperCAmelCase__ )
transformers.utils.logging.set_verbosity(UpperCAmelCase__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
lowercase_ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowercase_ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Initialize our dataset.
lowercase_ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
lowercase_ = None if """validation""" in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , UpperCAmelCase__ ) and data_args.train_val_split > 0.0:
lowercase_ = ds["""train"""].train_test_split(data_args.train_val_split )
lowercase_ = split["""train"""]
lowercase_ = split["""test"""]
# Create config
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowercase_ = {
"""cache_dir""": model_args.cache_dir,
"""revision""": model_args.model_revision,
"""use_auth_token""": True if model_args.use_auth_token else None,
}
if model_args.config_name_or_path:
lowercase_ = AutoConfig.from_pretrained(model_args.config_name_or_path , **UpperCAmelCase__ )
elif model_args.model_name_or_path:
lowercase_ = AutoConfig.from_pretrained(model_args.model_name_or_path , **UpperCAmelCase__ )
else:
lowercase_ = CONFIG_MAPPING[model_args.model_type]()
logger.warning("""You are instantiating a new config instance from scratch.""" )
if model_args.config_overrides is not None:
logger.info(F'''Overriding config: {model_args.config_overrides}''' )
config.update_from_string(model_args.config_overrides )
logger.info(F'''New config: {config}''' )
# make sure the decoder_type is "simmim" (only relevant for BEiT)
if hasattr(UpperCAmelCase__ , """decoder_type""" ):
lowercase_ = """simmim"""
# adapt config
lowercase_ = model_args.image_size if model_args.image_size is not None else config.image_size
lowercase_ = model_args.patch_size if model_args.patch_size is not None else config.patch_size
lowercase_ = (
model_args.encoder_stride if model_args.encoder_stride is not None else config.encoder_stride
)
config.update(
{
"""image_size""": model_args.image_size,
"""patch_size""": model_args.patch_size,
"""encoder_stride""": model_args.encoder_stride,
} )
# create image processor
if model_args.image_processor_name:
lowercase_ = AutoImageProcessor.from_pretrained(model_args.image_processor_name , **UpperCAmelCase__ )
elif model_args.model_name_or_path:
lowercase_ = AutoImageProcessor.from_pretrained(model_args.model_name_or_path , **UpperCAmelCase__ )
else:
lowercase_ = {
conf.model_type: image_processor_class for conf, image_processor_class in IMAGE_PROCESSOR_MAPPING.items()
}
lowercase_ = IMAGE_PROCESSOR_TYPES[model_args.model_type]()
# create model
if model_args.model_name_or_path:
lowercase_ = AutoModelForMaskedImageModeling.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=UpperCAmelCase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info("""Training new model from scratch""" )
lowercase_ = AutoModelForMaskedImageModeling.from_config(UpperCAmelCase__ )
if training_args.do_train:
lowercase_ = ds["""train"""].column_names
else:
lowercase_ = ds["""validation"""].column_names
if data_args.image_column_name is not None:
lowercase_ = data_args.image_column_name
elif "image" in column_names:
lowercase_ = """image"""
elif "img" in column_names:
lowercase_ = """img"""
else:
lowercase_ = column_names[0]
# transformations as done in original SimMIM paper
# source: https://github.com/microsoft/SimMIM/blob/main/data/data_simmim.py
lowercase_ = Compose(
[
Lambda(lambda UpperCAmelCase__ : img.convert("""RGB""" ) if img.mode != "RGB" else img ),
RandomResizedCrop(model_args.image_size , scale=(0.67, 1.0) , ratio=(3.0 / 4.0, 4.0 / 3.0) ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
# create mask generator
lowercase_ = MaskGenerator(
input_size=model_args.image_size , mask_patch_size=data_args.mask_patch_size , model_patch_size=model_args.patch_size , mask_ratio=data_args.mask_ratio , )
def preprocess_images(UpperCAmelCase__ ):
lowercase_ = [transforms(UpperCAmelCase__ ) for image in examples[image_column_name]]
lowercase_ = [mask_generator() for i in range(len(examples[image_column_name] ) )]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError("""--do_train requires a train dataset""" )
if data_args.max_train_samples is not None:
lowercase_ = ds["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(UpperCAmelCase__ )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError("""--do_eval requires a validation dataset""" )
if data_args.max_eval_samples is not None:
lowercase_ = (
ds["""validation"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(UpperCAmelCase__ )
# Initialize our trainer
lowercase_ = Trainer(
model=UpperCAmelCase__ , args=UpperCAmelCase__ , train_dataset=ds["""train"""] if training_args.do_train else None , eval_dataset=ds["""validation"""] if training_args.do_eval else None , tokenizer=UpperCAmelCase__ , data_collator=UpperCAmelCase__ , )
# Training
if training_args.do_train:
lowercase_ = None
if training_args.resume_from_checkpoint is not None:
lowercase_ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowercase_ = last_checkpoint
lowercase_ = trainer.train(resume_from_checkpoint=UpperCAmelCase__ )
trainer.save_model()
trainer.log_metrics("""train""" , train_result.metrics )
trainer.save_metrics("""train""" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
lowercase_ = trainer.evaluate()
trainer.log_metrics("""eval""" , UpperCAmelCase__ )
trainer.save_metrics("""eval""" , UpperCAmelCase__ )
# Write model card and (optionally) push to hub
lowercase_ = {
"""finetuned_from""": model_args.model_name_or_path,
"""tasks""": """masked-image-modeling""",
"""dataset""": data_args.dataset_name,
"""tags""": ["""masked-image-modeling"""],
}
if training_args.push_to_hub:
trainer.push_to_hub(**UpperCAmelCase__ )
else:
trainer.create_model_card(**UpperCAmelCase__ )
if __name__ == "__main__":
main()
| 650
| 1
|
import gc
import math
import unittest
import torch
from diffusers import UNetaDModel
from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
a = logging.get_logger(__name__)
enable_full_determinism()
class UpperCamelCase__ ( __magic_name__ , __magic_name__ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : int = UNetaDModel
__SCREAMING_SNAKE_CASE : Tuple = 'sample'
@property
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ = 4
lowercase_ = 3
lowercase_ = (32, 32)
lowercase_ = floats_tensor((batch_size, num_channels) + sizes ).to(UpperCamelCase__ )
lowercase_ = torch.tensor([10] ).to(UpperCamelCase__ )
return {"sample": noise, "timestep": time_step}
@property
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
return (3, 32, 32)
@property
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
return (3, 32, 32)
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase_ = {
"""block_out_channels""": (32, 64),
"""down_block_types""": ("""DownBlock2D""", """AttnDownBlock2D"""),
"""up_block_types""": ("""AttnUpBlock2D""", """UpBlock2D"""),
"""attention_head_dim""": 3,
"""out_channels""": 3,
"""in_channels""": 3,
"""layers_per_block""": 2,
"""sample_size""": 32,
}
lowercase_ = self.dummy_input
return init_dict, inputs_dict
class UpperCamelCase__ ( __magic_name__ , __magic_name__ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[str] = UNetaDModel
__SCREAMING_SNAKE_CASE : Any = 'sample'
@property
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ = 4
lowercase_ = 4
lowercase_ = (32, 32)
lowercase_ = floats_tensor((batch_size, num_channels) + sizes ).to(UpperCamelCase__ )
lowercase_ = torch.tensor([10] ).to(UpperCamelCase__ )
return {"sample": noise, "timestep": time_step}
@property
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
return (4, 32, 32)
@property
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
return (4, 32, 32)
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
lowercase_ = {
"""sample_size""": 32,
"""in_channels""": 4,
"""out_channels""": 4,
"""layers_per_block""": 2,
"""block_out_channels""": (32, 64),
"""attention_head_dim""": 32,
"""down_block_types""": ("""DownBlock2D""", """DownBlock2D"""),
"""up_block_types""": ("""UpBlock2D""", """UpBlock2D"""),
}
lowercase_ = self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
lowercase_ , lowercase_ = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 )
model.to(UpperCamelCase__ )
lowercase_ = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != """cuda""" , """This test is supposed to run on GPU""" )
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
lowercase_ , lowercase_ = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=UpperCamelCase__ )
model.to(UpperCamelCase__ )
lowercase_ = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != """cuda""" , """This test is supposed to run on GPU""" )
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
lowercase_ , lowercase_ = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=UpperCamelCase__ )
model_accelerate.to(UpperCamelCase__ )
model_accelerate.eval()
lowercase_ = torch.randn(
1 , model_accelerate.config.in_channels , model_accelerate.config.sample_size , model_accelerate.config.sample_size , generator=torch.manual_seed(0 ) , )
lowercase_ = noise.to(UpperCamelCase__ )
lowercase_ = torch.tensor([10] * noise.shape[0] ).to(UpperCamelCase__ )
lowercase_ = model_accelerate(UpperCamelCase__ , UpperCamelCase__ )["""sample"""]
# two models don't need to stay in the device at the same time
del model_accelerate
torch.cuda.empty_cache()
gc.collect()
lowercase_ , lowercase_ = UNetaDModel.from_pretrained(
"""fusing/unet-ldm-dummy-update""" , output_loading_info=UpperCamelCase__ , low_cpu_mem_usage=UpperCamelCase__ )
model_normal_load.to(UpperCamelCase__ )
model_normal_load.eval()
lowercase_ = model_normal_load(UpperCamelCase__ , UpperCamelCase__ )["""sample"""]
assert torch_all_close(UpperCamelCase__ , UpperCamelCase__ , rtol=1e-3 )
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" )
model.eval()
model.to(UpperCamelCase__ )
lowercase_ = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
lowercase_ = noise.to(UpperCamelCase__ )
lowercase_ = torch.tensor([10] * noise.shape[0] ).to(UpperCamelCase__ )
with torch.no_grad():
lowercase_ = model(UpperCamelCase__ , UpperCamelCase__ ).sample
lowercase_ = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
lowercase_ = torch.tensor([-13.3_258, -20.1_100, -15.9_873, -17.6_617, -23.0_596, -17.9_419, -13.3_675, -16.1_889, -12.3_800] )
# fmt: on
self.assertTrue(torch_all_close(UpperCamelCase__ , UpperCamelCase__ , rtol=1e-3 ) )
class UpperCamelCase__ ( __magic_name__ , __magic_name__ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[Any] = UNetaDModel
__SCREAMING_SNAKE_CASE : Tuple = 'sample'
@property
def UpperCAmelCase__ ( self : List[Any] , UpperCamelCase__ : List[str]=(32, 32) ):
'''simple docstring'''
lowercase_ = 4
lowercase_ = 3
lowercase_ = floats_tensor((batch_size, num_channels) + sizes ).to(UpperCamelCase__ )
lowercase_ = torch.tensor(batch_size * [10] ).to(dtype=torch.intaa , device=UpperCamelCase__ )
return {"sample": noise, "timestep": time_step}
@property
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
return (3, 32, 32)
@property
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
return (3, 32, 32)
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
lowercase_ = {
"""block_out_channels""": [32, 64, 64, 64],
"""in_channels""": 3,
"""layers_per_block""": 1,
"""out_channels""": 3,
"""time_embedding_type""": """fourier""",
"""norm_eps""": 1e-6,
"""mid_block_scale_factor""": math.sqrt(2.0 ),
"""norm_num_groups""": None,
"""down_block_types""": [
"""SkipDownBlock2D""",
"""AttnSkipDownBlock2D""",
"""SkipDownBlock2D""",
"""SkipDownBlock2D""",
],
"""up_block_types""": [
"""SkipUpBlock2D""",
"""SkipUpBlock2D""",
"""AttnSkipUpBlock2D""",
"""SkipUpBlock2D""",
],
}
lowercase_ = self.dummy_input
return init_dict, inputs_dict
@slow
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase_ , lowercase_ = UNetaDModel.from_pretrained("""google/ncsnpp-celebahq-256""" , output_loading_info=UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 )
model.to(UpperCamelCase__ )
lowercase_ = self.dummy_input
lowercase_ = floats_tensor((4, 3) + (256, 256) ).to(UpperCamelCase__ )
lowercase_ = noise
lowercase_ = model(**UpperCamelCase__ )
assert image is not None, "Make sure output is not None"
@slow
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
lowercase_ = UNetaDModel.from_pretrained("""google/ncsnpp-celebahq-256""" )
model.to(UpperCamelCase__ )
lowercase_ = 4
lowercase_ = 3
lowercase_ = (256, 256)
lowercase_ = torch.ones((batch_size, num_channels) + sizes ).to(UpperCamelCase__ )
lowercase_ = torch.tensor(batch_size * [1e-4] ).to(UpperCamelCase__ )
with torch.no_grad():
lowercase_ = model(UpperCamelCase__ , UpperCamelCase__ ).sample
lowercase_ = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
lowercase_ = torch.tensor([-4_842.8_691, -6_499.6_631, -3_800.1_953, -7_978.2_686, -10_980.7_129, -20_028.8_535, 8_148.2_822, 2_342.2_905, 567.7_608] )
# fmt: on
self.assertTrue(torch_all_close(UpperCamelCase__ , UpperCamelCase__ , rtol=1e-2 ) )
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
lowercase_ = UNetaDModel.from_pretrained("""fusing/ncsnpp-ffhq-ve-dummy-update""" )
model.to(UpperCamelCase__ )
lowercase_ = 4
lowercase_ = 3
lowercase_ = (32, 32)
lowercase_ = torch.ones((batch_size, num_channels) + sizes ).to(UpperCamelCase__ )
lowercase_ = torch.tensor(batch_size * [1e-4] ).to(UpperCamelCase__ )
with torch.no_grad():
lowercase_ = model(UpperCamelCase__ , UpperCamelCase__ ).sample
lowercase_ = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
lowercase_ = torch.tensor([-0.0_325, -0.0_900, -0.0_869, -0.0_332, -0.0_725, -0.0_270, -0.0_101, 0.0_227, 0.0_256] )
# fmt: on
self.assertTrue(torch_all_close(UpperCamelCase__ , UpperCamelCase__ , rtol=1e-2 ) )
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
pass
| 650
|
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
a = logging.get_logger(__name__)
class UpperCamelCase__ ( __magic_name__ ):
__SCREAMING_SNAKE_CASE : List[Any] = ['pixel_values']
def __init__( self : List[str] , UpperCamelCase__ : bool = True , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase__ : bool = True , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : bool = True , UpperCamelCase__ : Union[int, float] = 1 / 255 , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_MEAN , UpperCamelCase__ : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_STD , **UpperCamelCase__ : Dict , ):
'''simple docstring'''
super().__init__(**UpperCamelCase__ )
lowercase_ = size if size is not None else {"""shortest_edge""": 224}
lowercase_ = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ )
lowercase_ = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
lowercase_ = get_size_dict(UpperCamelCase__ , param_name="""crop_size""" )
lowercase_ = do_resize
lowercase_ = size
lowercase_ = resample
lowercase_ = do_center_crop
lowercase_ = crop_size
lowercase_ = do_rescale
lowercase_ = rescale_factor
lowercase_ = do_normalize
lowercase_ = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
lowercase_ = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def UpperCAmelCase__ ( self : int , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Optional[int] , ):
'''simple docstring'''
lowercase_ = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
lowercase_ = int((256 / 224) * size["""shortest_edge"""] )
lowercase_ = get_resize_output_image_size(UpperCamelCase__ , size=UpperCamelCase__ , default_to_square=UpperCamelCase__ )
lowercase_ = {"""height""": output_size[0], """width""": output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
F'''Size dict must have keys \'height\' and \'width\' or \'shortest_edge\'. Got {size_dict.keys()}''' )
return resize(
UpperCamelCase__ , size=(size_dict["""height"""], size_dict["""width"""]) , resample=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def UpperCAmelCase__ ( self : str , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : str , ):
'''simple docstring'''
lowercase_ = get_size_dict(UpperCamelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(F'''Size dict must have keys \'height\' and \'width\'. Got {size.keys()}''' )
return center_crop(UpperCamelCase__ , size=(size["""height"""], size["""width"""]) , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def UpperCAmelCase__ ( self : Optional[int] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Union[int, float] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : str , ):
'''simple docstring'''
return rescale(UpperCamelCase__ , scale=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def UpperCAmelCase__ ( self : List[str] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Optional[int] , ):
'''simple docstring'''
return normalize(UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def UpperCAmelCase__ ( self : Dict , UpperCamelCase__ : ImageInput , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[Dict[str, int]] = None , UpperCamelCase__ : PILImageResampling = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[Dict[str, int]] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[float] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[Union[float, Iterable[float]]] = None , UpperCamelCase__ : Optional[Union[float, Iterable[float]]] = None , UpperCamelCase__ : Optional[TensorType] = None , UpperCamelCase__ : ChannelDimension = ChannelDimension.FIRST , **UpperCamelCase__ : Optional[int] , ):
'''simple docstring'''
lowercase_ = do_resize if do_resize is not None else self.do_resize
lowercase_ = resample if resample is not None else self.resample
lowercase_ = do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase_ = do_rescale if do_rescale is not None else self.do_rescale
lowercase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase_ = do_normalize if do_normalize is not None else self.do_normalize
lowercase_ = image_mean if image_mean is not None else self.image_mean
lowercase_ = image_std if image_std is not None else self.image_std
lowercase_ = size if size is not None else self.size
lowercase_ = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ )
lowercase_ = crop_size if crop_size is not None else self.crop_size
lowercase_ = get_size_dict(UpperCamelCase__ , param_name="""crop_size""" )
lowercase_ = make_list_of_images(UpperCamelCase__ )
if not valid_images(UpperCamelCase__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
lowercase_ = [to_numpy_array(UpperCamelCase__ ) for image in images]
if do_resize:
lowercase_ = [self.resize(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) for image in images]
if do_center_crop:
lowercase_ = [self.center_crop(UpperCamelCase__ , UpperCamelCase__ ) for image in images]
if do_rescale:
lowercase_ = [self.rescale(UpperCamelCase__ , UpperCamelCase__ ) for image in images]
if do_normalize:
lowercase_ = [self.normalize(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) for image in images]
lowercase_ = [to_channel_dimension_format(UpperCamelCase__ , UpperCamelCase__ ) for image in images]
lowercase_ = {"""pixel_values""": images}
return BatchFeature(data=UpperCamelCase__ , tensor_type=UpperCamelCase__ )
| 650
| 1
|
import math
import unittest
def UpperCAmelCase_ ( UpperCAmelCase__ ):
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(UpperCAmelCase__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
class UpperCamelCase__ ( unittest.TestCase ):
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
self.assertTrue(is_prime(2 ) )
self.assertTrue(is_prime(3 ) )
self.assertTrue(is_prime(5 ) )
self.assertTrue(is_prime(7 ) )
self.assertTrue(is_prime(11 ) )
self.assertTrue(is_prime(13 ) )
self.assertTrue(is_prime(17 ) )
self.assertTrue(is_prime(19 ) )
self.assertTrue(is_prime(23 ) )
self.assertTrue(is_prime(29 ) )
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
with self.assertRaises(UpperCamelCase__ ):
is_prime(-19 )
self.assertFalse(
is_prime(0 ) , """Zero doesn't have any positive factors, primes must have exactly two.""" , )
self.assertFalse(
is_prime(1 ) , """One only has 1 positive factor, primes must have exactly two.""" , )
self.assertFalse(is_prime(2 * 2 ) )
self.assertFalse(is_prime(2 * 3 ) )
self.assertFalse(is_prime(3 * 3 ) )
self.assertFalse(is_prime(3 * 5 ) )
self.assertFalse(is_prime(3 * 5 * 7 ) )
if __name__ == "__main__":
unittest.main()
| 650
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
a = {
'configuration_trocr': ['TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TrOCRConfig'],
'processing_trocr': ['TrOCRProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
'TROCR_PRETRAINED_MODEL_ARCHIVE_LIST',
'TrOCRForCausalLM',
'TrOCRPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 650
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
a = {
'configuration_m2m_100': ['M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP', 'M2M100Config', 'M2M100OnnxConfig'],
'tokenization_m2m_100': ['M2M100Tokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
'M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST',
'M2M100ForConditionalGeneration',
'M2M100Model',
'M2M100PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 650
|
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
a = logging.get_logger(__name__)
a = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
a = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
for attribute in key.split(""".""" ):
lowercase_ = getattr(UpperCAmelCase__ , UpperCAmelCase__ )
if weight_type is not None:
lowercase_ = getattr(UpperCAmelCase__ , UpperCAmelCase__ ).shape
else:
lowercase_ = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
lowercase_ = value
elif weight_type == "weight_g":
lowercase_ = value
elif weight_type == "weight_v":
lowercase_ = value
elif weight_type == "bias":
lowercase_ = value
else:
lowercase_ = value
logger.info(F'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase_ = []
lowercase_ = fairseq_model.state_dict()
lowercase_ = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
lowercase_ = None
for name, value in fairseq_dict.items():
lowercase_ = False
if "conv_layers" in name:
load_conv_layer(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , hf_model.config.feat_extract_norm == """group""" , )
lowercase_ = True
elif name.split(""".""" )[0] == "proj":
lowercase_ = fairseq_model.proj
lowercase_ = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
lowercase_ = True
if "*" in mapped_key:
lowercase_ = name.split(UpperCAmelCase__ )[0].split(""".""" )[-2]
lowercase_ = mapped_key.replace("""*""" , UpperCAmelCase__ )
if "weight_g" in name:
lowercase_ = """weight_g"""
elif "weight_v" in name:
lowercase_ = """weight_v"""
elif "bias" in name:
lowercase_ = """bias"""
elif "weight" in name:
lowercase_ = """weight"""
else:
lowercase_ = None
set_recursively(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
continue
if not is_used:
unused_weights.append(UpperCAmelCase__ )
logger.warning(F'''Unused weights: {unused_weights}''' )
return proj_weight
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase_ = full_name.split("""conv_layers.""" )[-1]
lowercase_ = name.split(""".""" )
lowercase_ = int(items[0] )
lowercase_ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
lowercase_ = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
lowercase_ = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
lowercase_ = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
lowercase_ = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(UpperCAmelCase__ )
def UpperCAmelCase_ ( UpperCAmelCase__ ):
lowercase_ , lowercase_ = emb.weight.shape
lowercase_ = nn.Linear(UpperCAmelCase__ , UpperCAmelCase__ , bias=UpperCAmelCase__ )
lowercase_ = emb.weight.data
return lin_layer
def UpperCAmelCase_ ( UpperCAmelCase__ ):
with open(UpperCAmelCase__ , """r""" , encoding="""utf-8""" ) as f:
lowercase_ = f.readlines()
lowercase_ = [line.split(""" """ )[0] for line in lines]
lowercase_ = len(UpperCAmelCase__ )
lowercase_ = {
"""<s>""": 0,
"""<pad>""": 1,
"""</s>""": 2,
"""<unk>""": 3,
}
vocab_dict.update(dict(zip(UpperCAmelCase__ , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , ):
lowercase_ = WavaVecaConfig.from_pretrained(UpperCAmelCase__ )
lowercase_ = SpeechaTextaConfig.from_pretrained(
UpperCAmelCase__ , vocab_size=UpperCAmelCase__ , decoder_layers=UpperCAmelCase__ , do_stable_layer_norm=UpperCAmelCase__ )
lowercase_ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , )
lowercase_ , lowercase_ , lowercase_ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
lowercase_ = model[0].eval()
# set weights for wav2vec2 encoder
lowercase_ = WavaVecaModel(UpperCAmelCase__ )
lowercase_ = recursively_load_weights_wavaveca(model.encoder , UpperCAmelCase__ )
lowercase_ = SpeechaTextaForCausalLM(UpperCAmelCase__ )
lowercase_ , lowercase_ = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=UpperCAmelCase__ )
# set output linear layer
unexpected_keys.remove("""embed_out""" )
lowercase_ = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(F'''The following keys are missing when loading the decoder weights: {missing_keys}''' )
logger.warning(F'''The following keys are unexpected when loading the decoder weights: {unexpected_keys}''' )
lowercase_ = SpeechEncoderDecoderModel(encoder=UpperCAmelCase__ , decoder=UpperCAmelCase__ )
lowercase_ = False
# add projection layer
lowercase_ = nn.Parameter(projection_layer.weight )
lowercase_ = nn.Parameter(projection_layer.bias )
lowercase_ = create_vocab_dict(UpperCAmelCase__ )
with open(os.path.join(UpperCAmelCase__ , """vocab.json""" ) , """w""" ) as fp:
json.dump(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase_ = SpeechaTextaTokenizer(os.path.join(UpperCAmelCase__ , """vocab.json""" ) )
tokenizer.save_pretrained(UpperCAmelCase__ )
lowercase_ = hf_wavavec.config.to_dict()
lowercase_ = tokenizer.pad_token_id
lowercase_ = tokenizer.bos_token_id
lowercase_ = tokenizer.eos_token_id
lowercase_ = """speech_to_text_2"""
lowercase_ = """wav2vec2"""
lowercase_ = SpeechEncoderDecoderConfig.from_dict(UpperCAmelCase__ )
hf_wavavec.save_pretrained(UpperCAmelCase__ )
feature_extractor.save_pretrained(UpperCAmelCase__ )
if __name__ == "__main__":
a = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument(
'--encoder_config_path',
default='facebook/wav2vec2-large-lv60',
type=str,
help='Path to hf encoder wav2vec2 checkpoint config',
)
parser.add_argument(
'--decoder_config_path',
default='facebook/s2t-small-mustc-en-fr-st',
type=str,
help='Path to hf decoder s2t checkpoint config',
)
parser.add_argument('--vocab_size', default=1_0_2_2_4, type=int, help='Vocab size of decoder')
parser.add_argument('--num_decoder_layers', default=7, type=int, help='Number of decoder layers')
a = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 650
| 1
|
import json
import os
import re
import sys
import urllib.request
import requests
from bsa import BeautifulSoup
a = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'
' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582'
}
def UpperCAmelCase_ ( UpperCAmelCase__ = "dhaka" , UpperCAmelCase__ = 5 ):
lowercase_ = min(UpperCAmelCase__ , 5_0 ) # Prevent abuse!
lowercase_ = {
"""q""": query,
"""tbm""": """isch""",
"""hl""": """en""",
"""ijn""": """0""",
}
lowercase_ = requests.get("""https://www.google.com/search""" , params=UpperCAmelCase__ , headers=UpperCAmelCase__ )
lowercase_ = BeautifulSoup(html.text , """html.parser""" )
lowercase_ = """""".join(
re.findall(r"""AF_initDataCallback\(([^<]+)\);""" , str(soup.select("""script""" ) ) ) )
lowercase_ = json.dumps(UpperCAmelCase__ )
lowercase_ = json.loads(UpperCAmelCase__ )
lowercase_ = re.findall(
r"""\[\"GRID_STATE0\",null,\[\[1,\[0,\".*?\",(.*),\"All\",""" , UpperCAmelCase__ , )
if not matched_google_image_data:
return 0
lowercase_ = re.sub(
r"""\[\"(https\:\/\/encrypted-tbn0\.gstatic\.com\/images\?.*?)\",\d+,\d+\]""" , """""" , str(UpperCAmelCase__ ) , )
lowercase_ = re.findall(
r"""(?:'|,),\[\"(https:|http.*?)\",\d+,\d+\]""" , UpperCAmelCase__ , )
for index, fixed_full_res_image in enumerate(UpperCAmelCase__ ):
if index >= max_images:
return index
lowercase_ = bytes(UpperCAmelCase__ , """ascii""" ).decode(
"""unicode-escape""" )
lowercase_ = bytes(UpperCAmelCase__ , """ascii""" ).decode(
"""unicode-escape""" )
lowercase_ = urllib.request.build_opener()
lowercase_ = [
(
"""User-Agent""",
"""Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"""
""" (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582""",
)
]
urllib.request.install_opener(UpperCAmelCase__ )
lowercase_ = F'''query_{query.replace(" " , "_" )}'''
if not os.path.exists(UpperCAmelCase__ ):
os.makedirs(UpperCAmelCase__ )
urllib.request.urlretrieve( # noqa: S310
UpperCAmelCase__ , F'''{path_name}/original_size_img_{index}.jpg''' )
return index
if __name__ == "__main__":
try:
a = download_images_from_google_query(sys.argv[1])
print(F'''{image_count} images were downloaded to disk.''')
except IndexError:
print('Please provide a search term.')
raise
| 650
|
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a = logging.get_logger(__name__)
# TODO Update this
a = {
'facebook/esm-1b': 'https://huggingface.co/facebook/esm-1b/resolve/main/config.json',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class UpperCamelCase__ ( __magic_name__ ):
__SCREAMING_SNAKE_CASE : Tuple = 'esm'
def __init__( self : Optional[Any] , UpperCamelCase__ : Dict=None , UpperCamelCase__ : Tuple=None , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : List[Any]=768 , UpperCamelCase__ : Tuple=12 , UpperCamelCase__ : Optional[int]=12 , UpperCamelCase__ : Dict=3_072 , UpperCamelCase__ : Union[str, Any]=0.1 , UpperCamelCase__ : Dict=0.1 , UpperCamelCase__ : Optional[int]=1_026 , UpperCamelCase__ : Any=0.02 , UpperCamelCase__ : Dict=1e-12 , UpperCamelCase__ : List[str]="absolute" , UpperCamelCase__ : Union[str, Any]=True , UpperCamelCase__ : Tuple=None , UpperCamelCase__ : Union[str, Any]=False , UpperCamelCase__ : Optional[int]=False , UpperCamelCase__ : Dict=None , UpperCamelCase__ : str=None , **UpperCamelCase__ : str , ):
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase__ , mask_token_id=UpperCamelCase__ , **UpperCamelCase__ )
lowercase_ = vocab_size
lowercase_ = hidden_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = intermediate_size
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = max_position_embeddings
lowercase_ = initializer_range
lowercase_ = layer_norm_eps
lowercase_ = position_embedding_type
lowercase_ = use_cache
lowercase_ = emb_layer_norm_before
lowercase_ = token_dropout
lowercase_ = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info("""No esmfold_config supplied for folding model, using default values.""" )
lowercase_ = EsmFoldConfig()
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
lowercase_ = EsmFoldConfig(**UpperCamelCase__ )
lowercase_ = esmfold_config
if vocab_list is None:
logger.warning("""No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!""" )
lowercase_ = get_default_vocab_list()
else:
lowercase_ = vocab_list
else:
lowercase_ = None
lowercase_ = None
if self.esmfold_config is not None and getattr(self.esmfold_config , """use_esm_attn_map""" , UpperCamelCase__ ):
raise ValueError("""The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!""" )
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
lowercase_ = super().to_dict()
if isinstance(self.esmfold_config , UpperCamelCase__ ):
lowercase_ = self.esmfold_config.to_dict()
return output
@dataclass
class UpperCamelCase__ :
__SCREAMING_SNAKE_CASE : str = None
__SCREAMING_SNAKE_CASE : bool = True
__SCREAMING_SNAKE_CASE : bool = False
__SCREAMING_SNAKE_CASE : bool = False
__SCREAMING_SNAKE_CASE : bool = False
__SCREAMING_SNAKE_CASE : float = 0
__SCREAMING_SNAKE_CASE : bool = True
__SCREAMING_SNAKE_CASE : bool = False
__SCREAMING_SNAKE_CASE : int = 128
__SCREAMING_SNAKE_CASE : "TrunkConfig" = None
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
if self.trunk is None:
lowercase_ = TrunkConfig()
elif isinstance(self.trunk , UpperCamelCase__ ):
lowercase_ = TrunkConfig(**self.trunk )
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ = asdict(self )
lowercase_ = self.trunk.to_dict()
return output
@dataclass
class UpperCamelCase__ :
__SCREAMING_SNAKE_CASE : int = 48
__SCREAMING_SNAKE_CASE : int = 1024
__SCREAMING_SNAKE_CASE : int = 128
__SCREAMING_SNAKE_CASE : int = 32
__SCREAMING_SNAKE_CASE : int = 32
__SCREAMING_SNAKE_CASE : int = 32
__SCREAMING_SNAKE_CASE : float = 0
__SCREAMING_SNAKE_CASE : float = 0
__SCREAMING_SNAKE_CASE : bool = False
__SCREAMING_SNAKE_CASE : int = 4
__SCREAMING_SNAKE_CASE : Optional[int] = 128
__SCREAMING_SNAKE_CASE : "StructureModuleConfig" = None
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
if self.structure_module is None:
lowercase_ = StructureModuleConfig()
elif isinstance(self.structure_module , UpperCamelCase__ ):
lowercase_ = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(F'''`max_recycles` should be positive, got {self.max_recycles}.''' )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
"""`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got"""
F''' {self.sequence_state_dim} and {self.sequence_state_dim}.''' )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
"""`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got"""
F''' {self.pairwise_state_dim} and {self.pairwise_state_dim}.''' )
lowercase_ = self.sequence_state_dim // self.sequence_head_width
lowercase_ = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
"""`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got"""
F''' {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.''' )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
"""`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got"""
F''' {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.''' )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(F'''`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.''' )
if self.dropout >= 0.4:
raise ValueError(F'''`dropout` should not be greater than 0.4, got {self.dropout}.''' )
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ = asdict(self )
lowercase_ = self.structure_module.to_dict()
return output
@dataclass
class UpperCamelCase__ :
__SCREAMING_SNAKE_CASE : int = 384
__SCREAMING_SNAKE_CASE : int = 128
__SCREAMING_SNAKE_CASE : int = 16
__SCREAMING_SNAKE_CASE : int = 128
__SCREAMING_SNAKE_CASE : int = 12
__SCREAMING_SNAKE_CASE : int = 4
__SCREAMING_SNAKE_CASE : int = 8
__SCREAMING_SNAKE_CASE : float = 0.1
__SCREAMING_SNAKE_CASE : int = 8
__SCREAMING_SNAKE_CASE : int = 1
__SCREAMING_SNAKE_CASE : int = 2
__SCREAMING_SNAKE_CASE : int = 7
__SCREAMING_SNAKE_CASE : int = 10
__SCREAMING_SNAKE_CASE : float = 1e-8
__SCREAMING_SNAKE_CASE : float = 1e5
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
return asdict(self )
def UpperCAmelCase_ ( ):
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 650
| 1
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_batched,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
a = logging.get_logger(__name__)
class UpperCamelCase__ ( __magic_name__ ):
__SCREAMING_SNAKE_CASE : Any = ['pixel_values']
def __init__( self : Optional[Any] , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[Dict[str, int]] = None , UpperCamelCase__ : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase__ : bool = True , UpperCamelCase__ : bool = True , UpperCamelCase__ : Union[int, float] = 1 / 255 , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , **UpperCamelCase__ : List[Any] , ):
'''simple docstring'''
super().__init__(**UpperCamelCase__ )
lowercase_ = size if size is not None else {"""height""": 224, """width""": 224}
lowercase_ = get_size_dict(UpperCamelCase__ )
lowercase_ = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
lowercase_ = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ , param_name="""crop_size""" )
lowercase_ = do_resize
lowercase_ = do_rescale
lowercase_ = do_normalize
lowercase_ = do_center_crop
lowercase_ = crop_size
lowercase_ = size
lowercase_ = resample
lowercase_ = rescale_factor
lowercase_ = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
lowercase_ = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def UpperCAmelCase__ ( self : Optional[Any] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : PILImageResampling = PILImageResampling.BILINEAR , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Tuple , ):
'''simple docstring'''
lowercase_ = get_size_dict(UpperCamelCase__ )
if "shortest_edge" in size:
lowercase_ = get_resize_output_image_size(UpperCamelCase__ , size=size["""shortest_edge"""] , default_to_square=UpperCamelCase__ )
# size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"])
elif "height" in size and "width" in size:
lowercase_ = (size["""height"""], size["""width"""])
else:
raise ValueError(F'''Size must contain \'height\' and \'width\' keys or \'shortest_edge\' key. Got {size.keys()}''' )
return resize(UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def UpperCAmelCase__ ( self : List[Any] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : str , ):
'''simple docstring'''
lowercase_ = get_size_dict(UpperCamelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(F'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(UpperCamelCase__ , size=(size["""height"""], size["""width"""]) , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def UpperCAmelCase__ ( self : List[str] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : float , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : str ):
'''simple docstring'''
return rescale(UpperCamelCase__ , scale=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def UpperCAmelCase__ ( self : Tuple , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Optional[int] , ):
'''simple docstring'''
return normalize(UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def UpperCAmelCase__ ( self : str , UpperCamelCase__ : ImageInput , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : PILImageResampling = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : int = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[float] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , UpperCamelCase__ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **UpperCamelCase__ : Dict , ):
'''simple docstring'''
lowercase_ = do_resize if do_resize is not None else self.do_resize
lowercase_ = do_rescale if do_rescale is not None else self.do_rescale
lowercase_ = do_normalize if do_normalize is not None else self.do_normalize
lowercase_ = do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase_ = crop_size if crop_size is not None else self.crop_size
lowercase_ = get_size_dict(UpperCamelCase__ , param_name="""crop_size""" , default_to_square=UpperCamelCase__ )
lowercase_ = resample if resample is not None else self.resample
lowercase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase_ = image_mean if image_mean is not None else self.image_mean
lowercase_ = image_std if image_std is not None else self.image_std
lowercase_ = size if size is not None else self.size
lowercase_ = get_size_dict(UpperCamelCase__ )
if not is_batched(UpperCamelCase__ ):
lowercase_ = [images]
if not valid_images(UpperCamelCase__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
# All transformations expect numpy arrays.
lowercase_ = [to_numpy_array(UpperCamelCase__ ) for image in images]
if do_resize:
lowercase_ = [self.resize(image=UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ ) for image in images]
if do_center_crop:
lowercase_ = [self.center_crop(image=UpperCamelCase__ , size=UpperCamelCase__ ) for image in images]
if do_rescale:
lowercase_ = [self.rescale(image=UpperCamelCase__ , scale=UpperCamelCase__ ) for image in images]
if do_normalize:
lowercase_ = [self.normalize(image=UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ ) for image in images]
lowercase_ = [to_channel_dimension_format(UpperCamelCase__ , UpperCamelCase__ ) for image in images]
lowercase_ = {"""pixel_values""": images}
return BatchFeature(data=UpperCamelCase__ , tensor_type=UpperCamelCase__ )
| 650
|
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import platform
import numpy as np
import psutil
import torch
from accelerate import __version__ as version
from accelerate.commands.config import default_config_file, load_config_from_file
from ..utils import is_npu_available, is_xpu_available
def UpperCAmelCase_ ( UpperCAmelCase__=None ):
if subparsers is not None:
lowercase_ = subparsers.add_parser("""env""" )
else:
lowercase_ = argparse.ArgumentParser("""Accelerate env command""" )
parser.add_argument(
"""--config_file""" , default=UpperCAmelCase__ , help="""The config file to use for the default values in the launching script.""" )
if subparsers is not None:
parser.set_defaults(func=UpperCAmelCase__ )
return parser
def UpperCAmelCase_ ( UpperCAmelCase__ ):
lowercase_ = torch.__version__
lowercase_ = torch.cuda.is_available()
lowercase_ = is_xpu_available()
lowercase_ = is_npu_available()
lowercase_ = """Not found"""
# Get the default from the config file.
if args.config_file is not None or os.path.isfile(UpperCAmelCase__ ):
lowercase_ = load_config_from_file(args.config_file ).to_dict()
lowercase_ = {
"""`Accelerate` version""": version,
"""Platform""": platform.platform(),
"""Python version""": platform.python_version(),
"""Numpy version""": np.__version__,
"""PyTorch version (GPU?)""": F'''{pt_version} ({pt_cuda_available})''',
"""PyTorch XPU available""": str(UpperCAmelCase__ ),
"""PyTorch NPU available""": str(UpperCAmelCase__ ),
"""System RAM""": F'''{psutil.virtual_memory().total / 1_0_2_4 ** 3:.2f} GB''',
}
if pt_cuda_available:
lowercase_ = torch.cuda.get_device_name()
print("""\nCopy-and-paste the text below in your GitHub issue\n""" )
print("""\n""".join([F'''- {prop}: {val}''' for prop, val in info.items()] ) )
print("""- `Accelerate` default config:""" if args.config_file is None else """- `Accelerate` config passed:""" )
lowercase_ = (
"""\n""".join([F'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()] )
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
else F'''\t{accelerate_config}'''
)
print(UpperCAmelCase__ )
lowercase_ = accelerate_config
return info
def UpperCAmelCase_ ( ):
lowercase_ = env_command_parser()
lowercase_ = parser.parse_args()
env_command(UpperCAmelCase__ )
return 0
if __name__ == "__main__":
raise SystemExit(main())
| 650
| 1
|
import logging
import os
from typing import List, Tuple
import numpy as np
import psutil
import torch
import torch.distributed as dist
from transformers import RagRetriever
a = logging.getLogger(__name__)
class UpperCamelCase__ ( __magic_name__ ):
def __init__( self : Union[str, Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[str]=None ):
'''simple docstring'''
super().__init__(
UpperCamelCase__ , question_encoder_tokenizer=UpperCamelCase__ , generator_tokenizer=UpperCamelCase__ , index=UpperCamelCase__ , init_retrieval=UpperCamelCase__ , )
lowercase_ = None
def UpperCAmelCase__ ( self : int , UpperCamelCase__ : int ):
'''simple docstring'''
logger.info("""initializing retrieval""" )
# initializing a separate process group for retrieval as the default
# nccl backend doesn't support gather/scatter operations while gloo
# is too slow to replace nccl for the core gpu communication
if dist.is_initialized():
logger.info("""dist initialized""" )
# needs to be set manually
lowercase_ = self._infer_socket_ifname()
# avoid clash with the NCCL port
lowercase_ = str(distributed_port + 1 )
lowercase_ = dist.new_group(ranks=UpperCamelCase__ , backend="""gloo""" )
# initialize retriever only on the main worker
if not dist.is_initialized() or self._is_main():
logger.info("""dist not initialized / main""" )
self.index.init_index()
# all processes wait untill the retriever is initialized by the main process
if dist.is_initialized():
torch.distributed.barrier(group=self.process_group )
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
return dist.get_rank(group=self.process_group ) == 0
def UpperCAmelCase__ ( self : str , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[int]=torch.floataa ):
'''simple docstring'''
lowercase_ = torch.empty(UpperCamelCase__ , dtype=UpperCamelCase__ )
dist.scatter(UpperCamelCase__ , src=0 , scatter_list=UpperCamelCase__ , group=self.process_group )
return target_tensor
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ = psutil.net_if_addrs()
# a hacky way to deal with varying network interface names
lowercase_ = next((addr for addr in addrs if addr.startswith("""e""" )) , UpperCamelCase__ )
return ifname
def UpperCAmelCase__ ( self : List[Any] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : int ):
'''simple docstring'''
if not dist.is_initialized():
lowercase_ , lowercase_ = self._main_retrieve(UpperCamelCase__ , UpperCamelCase__ )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(UpperCamelCase__ )
# distributed training
lowercase_ = dist.get_world_size(group=self.process_group )
# gather logic
lowercase_ = None
if self._is_main():
lowercase_ = [torch.empty(question_hidden_states.shape , dtype=torch.floataa ) for _ in range(UpperCamelCase__ )]
dist.gather(torch.tensor(UpperCamelCase__ ) , dst=0 , gather_list=UpperCamelCase__ , group=self.process_group )
# scatter logic
lowercase_ = question_hidden_states.shape[0]
lowercase_ = []
lowercase_ = []
if self._is_main():
assert len(UpperCamelCase__ ) == world_size
lowercase_ , lowercase_ = self._main_retrieve(torch.cat(UpperCamelCase__ ).numpy() , UpperCamelCase__ )
lowercase_ , lowercase_ = torch.tensor(UpperCamelCase__ ), torch.tensor(UpperCamelCase__ )
lowercase_ = self._chunk_tensor(UpperCamelCase__ , UpperCamelCase__ )
lowercase_ = self._chunk_tensor(UpperCamelCase__ , UpperCamelCase__ )
lowercase_ = self._scattered(UpperCamelCase__ , [n_queries, n_docs] , target_type=torch.intaa )
lowercase_ = self._scattered(UpperCamelCase__ , [n_queries, n_docs, question_hidden_states.shape[1]] )
return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(UpperCamelCase__ )
| 650
|
import inspect
import unittest
import warnings
from transformers import DeiTConfig
from transformers.models.auto import get_values
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
)
from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class UpperCamelCase__ :
def __init__( self : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple=13 , UpperCamelCase__ : Tuple=30 , UpperCamelCase__ : List[Any]=2 , UpperCamelCase__ : Union[str, Any]=3 , UpperCamelCase__ : Union[str, Any]=True , UpperCamelCase__ : Tuple=True , UpperCamelCase__ : int=32 , UpperCamelCase__ : Tuple=5 , UpperCamelCase__ : List[Any]=4 , UpperCamelCase__ : Dict=37 , UpperCamelCase__ : Tuple="gelu" , UpperCamelCase__ : Union[str, Any]=0.1 , UpperCamelCase__ : Optional[int]=0.1 , UpperCamelCase__ : Any=10 , UpperCamelCase__ : Optional[int]=0.02 , UpperCamelCase__ : Dict=3 , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : Union[str, Any]=2 , ):
'''simple docstring'''
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = image_size
lowercase_ = patch_size
lowercase_ = num_channels
lowercase_ = is_training
lowercase_ = use_labels
lowercase_ = hidden_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = intermediate_size
lowercase_ = hidden_act
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = type_sequence_label_size
lowercase_ = initializer_range
lowercase_ = scope
lowercase_ = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
lowercase_ = (image_size // patch_size) ** 2
lowercase_ = num_patches + 2
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase_ = None
if self.use_labels:
lowercase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase_ = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def UpperCAmelCase__ ( self : Tuple , UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple ):
'''simple docstring'''
lowercase_ = DeiTModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowercase_ = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase__ ( self : Optional[int] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[str] , UpperCamelCase__ : str ):
'''simple docstring'''
lowercase_ = DeiTForMaskedImageModeling(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowercase_ = model(UpperCamelCase__ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowercase_ = 1
lowercase_ = DeiTForMaskedImageModeling(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowercase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase_ = model(UpperCamelCase__ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def UpperCAmelCase__ ( self : Tuple , UpperCamelCase__ : str , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[str] ):
'''simple docstring'''
lowercase_ = self.type_sequence_label_size
lowercase_ = DeiTForImageClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowercase_ = model(UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowercase_ = 1
lowercase_ = DeiTForImageClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowercase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase_ = model(UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
lowercase_ = self.prepare_config_and_inputs()
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) = config_and_inputs
lowercase_ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( __magic_name__ , __magic_name__ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Any = (
(
DeiTModel,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
__SCREAMING_SNAKE_CASE : str = (
{
'feature-extraction': DeiTModel,
'image-classification': (DeiTForImageClassification, DeiTForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
__SCREAMING_SNAKE_CASE : Dict = False
__SCREAMING_SNAKE_CASE : List[Any] = False
__SCREAMING_SNAKE_CASE : List[Any] = False
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ = DeiTModelTester(self )
lowercase_ = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ , hidden_size=37 )
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""DeiT does not use inputs_embeds""" )
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
pass
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ = model_class(UpperCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase__ , nn.Linear ) )
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ = model_class(UpperCamelCase__ )
lowercase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ = [*signature.parameters.keys()]
lowercase_ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*UpperCamelCase__ )
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase__ )
def UpperCAmelCase__ ( self : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any]=False ):
'''simple docstring'''
lowercase_ = super()._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
if return_labels:
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
if not self.model_tester.is_training:
return
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ = True
for model_class in self.all_model_classes:
# DeiTForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(UpperCamelCase__ )
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
lowercase_ = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.train()
lowercase_ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
lowercase_ = model(**UpperCamelCase__ ).loss
loss.backward()
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
lowercase_ = False
lowercase_ = True
for model_class in self.all_model_classes:
if model_class in get_values(UpperCamelCase__ ) or not model_class.supports_gradient_checkpointing:
continue
# DeiTForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
continue
lowercase_ = model_class(UpperCamelCase__ )
model.gradient_checkpointing_enable()
model.to(UpperCamelCase__ )
model.train()
lowercase_ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
lowercase_ = model(**UpperCamelCase__ ).loss
loss.backward()
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ = [
{"""title""": """multi_label_classification""", """num_labels""": 2, """dtype""": torch.float},
{"""title""": """single_label_classification""", """num_labels""": 1, """dtype""": torch.long},
{"""title""": """regression""", """num_labels""": 1, """dtype""": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(UpperCamelCase__ ),
*get_values(UpperCamelCase__ ),
]
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F'''Testing {model_class} with {problem_type["title"]}''' ):
lowercase_ = problem_type["""title"""]
lowercase_ = problem_type["""num_labels"""]
lowercase_ = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.train()
lowercase_ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
if problem_type["num_labels"] > 1:
lowercase_ = inputs["""labels"""].unsqueeze(1 ).repeat(1 , problem_type["""num_labels"""] )
lowercase_ = inputs["""labels"""].to(problem_type["""dtype"""] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=UpperCamelCase__ ) as warning_list:
lowercase_ = model(**UpperCamelCase__ ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F'''Something is going wrong in the regression problem: intercepted {w.message}''' )
loss.backward()
@slow
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ = DeiTModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def UpperCAmelCase_ ( ):
lowercase_ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class UpperCamelCase__ ( unittest.TestCase ):
@cached_property
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
return (
DeiTImageProcessor.from_pretrained("""facebook/deit-base-distilled-patch16-224""" )
if is_vision_available()
else None
)
@slow
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
lowercase_ = DeiTForImageClassificationWithTeacher.from_pretrained("""facebook/deit-base-distilled-patch16-224""" ).to(
UpperCamelCase__ )
lowercase_ = self.default_image_processor
lowercase_ = prepare_img()
lowercase_ = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
lowercase_ = model(**UpperCamelCase__ )
# verify the logits
lowercase_ = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
lowercase_ = torch.tensor([-1.0_266, 0.1_912, -1.2_861] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1e-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ = DeiTModel.from_pretrained(
"""facebook/deit-base-distilled-patch16-224""" , torch_dtype=torch.floataa , device_map="""auto""" )
lowercase_ = self.default_image_processor
lowercase_ = prepare_img()
lowercase_ = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" )
lowercase_ = inputs.pixel_values.to(UpperCamelCase__ )
# forward pass to make sure inference works in fp16
with torch.no_grad():
lowercase_ = model(UpperCamelCase__ )
| 650
| 1
|
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ = 0 ):
lowercase_ = length or len(UpperCAmelCase__ )
lowercase_ = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
lowercase_ , lowercase_ = list_data[i + 1], list_data[i]
lowercase_ = True
return list_data if not swapped else bubble_sort(UpperCAmelCase__ , length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 650
|
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 650
| 1
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
a = logging.get_logger(__name__)
a = {'vocab_file': 'vocab.txt'}
a = {
'vocab_file': {
'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt',
'YituTech/conv-bert-medium-small': (
'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'
),
'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt',
}
}
a = {
'YituTech/conv-bert-base': 5_1_2,
'YituTech/conv-bert-medium-small': 5_1_2,
'YituTech/conv-bert-small': 5_1_2,
}
a = {
'YituTech/conv-bert-base': {'do_lower_case': True},
'YituTech/conv-bert-medium-small': {'do_lower_case': True},
'YituTech/conv-bert-small': {'do_lower_case': True},
}
class UpperCamelCase__ ( __magic_name__ ):
__SCREAMING_SNAKE_CASE : Any = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE : Dict = PRETRAINED_INIT_CONFIGURATION
__SCREAMING_SNAKE_CASE : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE : int = ConvBertTokenizer
def __init__( self : Union[str, Any] , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : Any=None , UpperCamelCase__ : Dict=True , UpperCamelCase__ : List[Any]="[UNK]" , UpperCamelCase__ : Dict="[SEP]" , UpperCamelCase__ : Union[str, Any]="[PAD]" , UpperCamelCase__ : Union[str, Any]="[CLS]" , UpperCamelCase__ : Dict="[MASK]" , UpperCamelCase__ : Dict=True , UpperCamelCase__ : Tuple=None , **UpperCamelCase__ : Dict , ):
'''simple docstring'''
super().__init__(
UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , do_lower_case=UpperCamelCase__ , unk_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , tokenize_chinese_chars=UpperCamelCase__ , strip_accents=UpperCamelCase__ , **UpperCamelCase__ , )
lowercase_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , UpperCamelCase__ ) != do_lower_case
or normalizer_state.get("""strip_accents""" , UpperCamelCase__ ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , UpperCamelCase__ ) != tokenize_chinese_chars
):
lowercase_ = getattr(UpperCamelCase__ , normalizer_state.pop("""type""" ) )
lowercase_ = do_lower_case
lowercase_ = strip_accents
lowercase_ = tokenize_chinese_chars
lowercase_ = normalizer_class(**UpperCamelCase__ )
lowercase_ = do_lower_case
def UpperCAmelCase__ ( self : Union[str, Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : str=None ):
'''simple docstring'''
lowercase_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase__ ( self : int , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
'''simple docstring'''
lowercase_ = [self.sep_token_id]
lowercase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase__ ( self : Optional[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ):
'''simple docstring'''
lowercase_ = self._tokenizer.model.save(UpperCamelCase__ , name=UpperCamelCase__ )
return tuple(UpperCamelCase__ )
| 650
|
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
a = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class UpperCamelCase__ ( __magic_name__ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[Any] = XGLMTokenizer
__SCREAMING_SNAKE_CASE : List[Any] = XGLMTokenizerFast
__SCREAMING_SNAKE_CASE : List[Any] = True
__SCREAMING_SNAKE_CASE : int = True
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
lowercase_ = XGLMTokenizer(UpperCamelCase__ , keep_accents=UpperCamelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
lowercase_ = """<pad>"""
lowercase_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase__ ) , UpperCamelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase__ ) , UpperCamelCase__ )
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
lowercase_ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(len(UpperCamelCase__ ) , 1_008 )
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1_008 )
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
lowercase_ = XGLMTokenizer(UpperCamelCase__ , keep_accents=UpperCamelCase__ )
lowercase_ = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(UpperCamelCase__ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
lowercase_ = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
UpperCamelCase__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
lowercase_ = tokenizer.convert_tokens_to_ids(UpperCamelCase__ )
self.assertListEqual(
UpperCamelCase__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
lowercase_ = tokenizer.convert_ids_to_tokens(UpperCamelCase__ )
self.assertListEqual(
UpperCamelCase__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
@cached_property
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
return XGLMTokenizer.from_pretrained("""facebook/xglm-564M""" )
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(UpperCamelCase__ , f.name )
lowercase_ = XGLMTokenizer(f.name , keep_accents=UpperCamelCase__ )
lowercase_ = pickle.dumps(UpperCamelCase__ )
pickle.loads(UpperCamelCase__ )
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
lowercase_ = self.get_tokenizer()
lowercase_ = self.get_rust_tokenizer()
lowercase_ = """I was born in 92000, and this is falsé."""
lowercase_ = tokenizer.tokenize(UpperCamelCase__ )
lowercase_ = rust_tokenizer.tokenize(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
lowercase_ = tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
lowercase_ = rust_tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
lowercase_ = self.get_rust_tokenizer()
lowercase_ = tokenizer.encode(UpperCamelCase__ )
lowercase_ = rust_tokenizer.encode(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
@slow
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ = """Hello World!"""
lowercase_ = [2, 31_227, 4_447, 35]
self.assertListEqual(UpperCamelCase__ , self.big_tokenizer.encode(UpperCamelCase__ ) )
@slow
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
lowercase_ = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth"""
)
# fmt: off
lowercase_ = [2, 1_018, 67, 11, 1_988, 2_617, 5_631, 278, 11, 3_407, 48, 71_630, 28_085, 4, 3_234, 157, 13, 6, 5, 6, 4, 3_526, 768, 15, 659, 57, 298, 3_983, 864, 129, 21, 6, 5, 13_675, 377, 652, 7_580, 10_341, 155, 2_817, 422, 1_666, 7, 1_674, 53, 113, 202_277, 17_892, 33, 60, 87, 4, 3_234, 157, 61, 2_667, 52_376, 19, 88, 23, 735]
# fmt: on
self.assertListEqual(UpperCamelCase__ , self.big_tokenizer.encode(UpperCamelCase__ ) )
@slow
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ = {
"""input_ids""": [[2, 108_825, 1_163, 15, 88_010, 473, 15_898, 157, 13_672, 1_857, 312, 8, 238_021, 1_163, 53, 13_672, 1_857, 312, 8, 53_283, 182_396, 8, 18_566, 16, 36_733, 4_101, 8, 230, 244_017, 122_553, 7, 15, 132_597, 4, 293, 12_511, 7_610, 4, 3_414, 132_597, 9, 4, 32_361, 362, 4, 734, 28_512, 32_569, 18, 4, 32_361, 26_096, 14_982, 73, 18_715, 21_433, 235_261, 15, 492, 12_427, 16, 53, 18_715, 21_433, 65_454, 15, 23_659, 563, 16, 278, 597, 2_843, 595, 7_931, 182_396, 64_186, 22, 886, 595, 132_981, 53, 25_540, 3_449, 43_982, 39_901, 5_951, 878, 330, 4, 27_694, 80_269, 312, 53, 6_517, 11_780, 611, 20_408, 5], [2, 6, 132_597, 67, 42_897, 33, 592, 8, 163_729, 25_540, 361, 136_997, 109_514, 173_230, 7, 501, 60, 102_913, 196, 5_631, 235, 63_243, 473, 6, 231_757, 74, 5_277, 7_905, 53, 3_095, 37_317, 22, 454, 183_874, 5], [2, 268, 31_298, 46_530, 6, 132_935, 43_831, 7, 597, 32, 24, 3_688, 9_865, 5]],
"""attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCamelCase__ , model_name="""facebook/xglm-564M""" , padding=UpperCamelCase__ , )
| 650
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a = logging.get_logger(__name__)
a = {
'RWKV/rwkv-4-169m-pile': 'https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json',
'RWKV/rwkv-4-430m-pile': 'https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json',
'RWKV/rwkv-4-1b5-pile': 'https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json',
'RWKV/rwkv-4-3b-pile': 'https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json',
'RWKV/rwkv-4-7b-pile': 'https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json',
'RWKV/rwkv-4-14b-pile': 'https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json',
'RWKV/rwkv-raven-1b5': 'https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json',
'RWKV/rwkv-raven-3b': 'https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json',
'RWKV/rwkv-raven-7b': 'https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json',
'RWKV/rwkv-raven-14b': 'https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json',
}
class UpperCamelCase__ ( __magic_name__ ):
__SCREAMING_SNAKE_CASE : Any = 'rwkv'
__SCREAMING_SNAKE_CASE : Optional[Any] = {'max_position_embeddings': 'context_length'}
def __init__( self : List[Any] , UpperCamelCase__ : List[str]=50_277 , UpperCamelCase__ : Optional[Any]=1_024 , UpperCamelCase__ : int=4_096 , UpperCamelCase__ : Optional[Any]=32 , UpperCamelCase__ : Any=None , UpperCamelCase__ : Tuple=None , UpperCamelCase__ : Tuple=1e-5 , UpperCamelCase__ : Union[str, Any]=0 , UpperCamelCase__ : Tuple=0 , UpperCamelCase__ : Union[str, Any]=6 , UpperCamelCase__ : Dict=False , UpperCamelCase__ : str=True , **UpperCamelCase__ : Union[str, Any] , ):
'''simple docstring'''
lowercase_ = vocab_size
lowercase_ = context_length
lowercase_ = hidden_size
lowercase_ = num_hidden_layers
lowercase_ = attention_hidden_size if attention_hidden_size is not None else hidden_size
lowercase_ = intermediate_size if intermediate_size is not None else 4 * hidden_size
lowercase_ = layer_norm_epsilon
lowercase_ = rescale_every
lowercase_ = use_cache
lowercase_ = bos_token_id
lowercase_ = eos_token_id
super().__init__(
tie_word_embeddings=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ )
| 650
|
import os
import re
import warnings
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_ta import TaTokenizer
else:
a = None
a = logging.get_logger(__name__)
a = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
a = {
'vocab_file': {
't5-small': 'https://huggingface.co/t5-small/resolve/main/spiece.model',
't5-base': 'https://huggingface.co/t5-base/resolve/main/spiece.model',
't5-large': 'https://huggingface.co/t5-large/resolve/main/spiece.model',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/spiece.model',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/spiece.model',
},
'tokenizer_file': {
't5-small': 'https://huggingface.co/t5-small/resolve/main/tokenizer.json',
't5-base': 'https://huggingface.co/t5-base/resolve/main/tokenizer.json',
't5-large': 'https://huggingface.co/t5-large/resolve/main/tokenizer.json',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/tokenizer.json',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/tokenizer.json',
},
}
# TODO(PVP) - this should be removed in Transformers v5
a = {
't5-small': 5_1_2,
't5-base': 5_1_2,
't5-large': 5_1_2,
't5-3b': 5_1_2,
't5-11b': 5_1_2,
}
class UpperCamelCase__ ( __magic_name__ ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE : int = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE : str = ['input_ids', 'attention_mask']
__SCREAMING_SNAKE_CASE : Dict = TaTokenizer
__SCREAMING_SNAKE_CASE : List[int] = []
def __init__( self : int , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : Any=None , UpperCamelCase__ : Dict="</s>" , UpperCamelCase__ : Tuple="<unk>" , UpperCamelCase__ : Optional[Any]="<pad>" , UpperCamelCase__ : Union[str, Any]=100 , UpperCamelCase__ : Optional[Any]=None , **UpperCamelCase__ : List[str] , ):
'''simple docstring'''
if extra_ids > 0 and additional_special_tokens is None:
lowercase_ = [F'''<extra_id_{i}>''' for i in range(UpperCamelCase__ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra special tokens
lowercase_ = len(set(filter(lambda UpperCamelCase__ : bool("""extra_id_""" in str(UpperCamelCase__ ) ) , UpperCamelCase__ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'''
""" provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids"""
""" tokens""" )
super().__init__(
UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , extra_ids=UpperCamelCase__ , additional_special_tokens=UpperCamelCase__ , **UpperCamelCase__ , )
lowercase_ = vocab_file
lowercase_ = False if not self.vocab_file else True
lowercase_ = extra_ids
@staticmethod
def UpperCAmelCase__ ( UpperCamelCase__ : Tuple , UpperCamelCase__ : str , UpperCamelCase__ : int ):
'''simple docstring'''
if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes:
lowercase_ = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
"""This tokenizer was incorrectly instantiated with a model max length of"""
F''' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'''
""" behavior is kept to avoid breaking backwards compatibility when padding/encoding with"""
""" `truncation is True`.\n- Be aware that you SHOULD NOT rely on"""
F''' {pretrained_model_name_or_path} automatically truncating your input to'''
F''' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'''
F''' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'''
""" `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please"""
""" instantiate this tokenizer with `model_max_length` set to your preferred value.""" , UpperCamelCase__ , )
return max_model_length
def UpperCAmelCase__ ( self : int , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(UpperCamelCase__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowercase_ = os.path.join(
UpperCamelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ):
copyfile(self.vocab_file , UpperCamelCase__ )
logger.info(F'''Copy vocab file to {out_vocab_file}''' )
return (out_vocab_file,)
def UpperCAmelCase__ ( self : int , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
'''simple docstring'''
lowercase_ = token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return self.prefix_tokens + token_ids_a
else:
lowercase_ = token_ids_a + [self.eos_token_id]
return self.prefix_tokens + token_ids_a + token_ids_a
def UpperCAmelCase__ ( self : str , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
'''simple docstring'''
lowercase_ = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
return list(
set(filter(lambda UpperCamelCase__ : bool(re.search(R"""<extra_id_\d+>""" , UpperCamelCase__ ) ) is not None , self.additional_special_tokens ) ) )
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
return [self.convert_tokens_to_ids(UpperCamelCase__ ) for token in self.get_sentinel_tokens()]
| 650
| 1
|
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ):
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase_ = tmp_path / """cache"""
lowercase_ = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowercase_ = JsonDatasetReader(UpperCAmelCase__ , cache_dir=UpperCAmelCase__ , keep_in_memory=UpperCAmelCase__ ).read()
_check_json_dataset(UpperCAmelCase__ , UpperCAmelCase__ )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase_ = tmp_path / """cache"""
lowercase_ = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
lowercase_ = features.copy() if features else default_expected_features
lowercase_ = (
Features({feature: Value(UpperCAmelCase__ ) for feature, dtype in features.items()} ) if features is not None else None
)
lowercase_ = JsonDatasetReader(UpperCAmelCase__ , features=UpperCAmelCase__ , cache_dir=UpperCAmelCase__ ).read()
_check_json_dataset(UpperCAmelCase__ , UpperCAmelCase__ )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_3""": """float64""", """col_1""": """string""", """col_2""": """int64"""},
] , )
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase_ = tmp_path / """cache"""
lowercase_ = {"""col_3""": """float64""", """col_1""": """string""", """col_2""": """int64"""}
lowercase_ = features.copy() if features else default_expected_features
lowercase_ = (
Features({feature: Value(UpperCAmelCase__ ) for feature, dtype in features.items()} ) if features is not None else None
)
lowercase_ = JsonDatasetReader(UpperCAmelCase__ , features=UpperCAmelCase__ , cache_dir=UpperCAmelCase__ ).read()
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ):
# jsonl_312_path features are {"col_3": "float64", "col_1": "string", "col_2": "int64"}
lowercase_ = {"""col_2""": """int64""", """col_3""": """float64""", """col_1""": """string"""}
lowercase_ = features.copy()
lowercase_ = (
Features({feature: Value(UpperCAmelCase__ ) for feature, dtype in features.items()} ) if features is not None else None
)
lowercase_ = tmp_path / """cache"""
lowercase_ = JsonDatasetReader(UpperCAmelCase__ , features=UpperCAmelCase__ , cache_dir=UpperCAmelCase__ ).read()
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase_ = tmp_path / """cache"""
lowercase_ = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
lowercase_ = JsonDatasetReader(UpperCAmelCase__ , cache_dir=UpperCAmelCase__ , split=UpperCAmelCase__ ).read()
_check_json_dataset(UpperCAmelCase__ , UpperCAmelCase__ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("""path_type""" , [str, list] )
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
if issubclass(UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase_ = jsonl_path
elif issubclass(UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase_ = [jsonl_path]
lowercase_ = tmp_path / """cache"""
lowercase_ = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
lowercase_ = JsonDatasetReader(UpperCAmelCase__ , cache_dir=UpperCAmelCase__ ).read()
_check_json_dataset(UpperCAmelCase__ , UpperCAmelCase__ )
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__=("train",) ):
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
for split in splits:
lowercase_ = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase_ = tmp_path / """cache"""
lowercase_ = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowercase_ = JsonDatasetReader({"""train""": jsonl_path} , cache_dir=UpperCAmelCase__ , keep_in_memory=UpperCAmelCase__ ).read()
_check_json_datasetdict(UpperCAmelCase__ , UpperCAmelCase__ )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase_ = tmp_path / """cache"""
lowercase_ = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
lowercase_ = features.copy() if features else default_expected_features
lowercase_ = (
Features({feature: Value(UpperCAmelCase__ ) for feature, dtype in features.items()} ) if features is not None else None
)
lowercase_ = JsonDatasetReader({"""train""": jsonl_path} , features=UpperCAmelCase__ , cache_dir=UpperCAmelCase__ ).read()
_check_json_datasetdict(UpperCAmelCase__ , UpperCAmelCase__ )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
if split:
lowercase_ = {split: jsonl_path}
else:
lowercase_ = """train"""
lowercase_ = {"""train""": jsonl_path, """test""": jsonl_path}
lowercase_ = tmp_path / """cache"""
lowercase_ = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
lowercase_ = JsonDatasetReader(UpperCAmelCase__ , cache_dir=UpperCAmelCase__ ).read()
_check_json_datasetdict(UpperCAmelCase__ , UpperCAmelCase__ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def UpperCAmelCase_ ( UpperCAmelCase__ ):
return json.load(UpperCAmelCase__ )
def UpperCAmelCase_ ( UpperCAmelCase__ ):
return [json.loads(UpperCAmelCase__ ) for line in buffer]
class UpperCamelCase__ :
@pytest.mark.parametrize("""lines, load_json_function""" , [(True, load_json_lines), (False, load_json)] )
def UpperCAmelCase__ ( self : List[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase__ , UpperCamelCase__ , lines=UpperCamelCase__ ).write()
buffer.seek(0 )
lowercase_ = load_json_function(UpperCamelCase__ )
assert isinstance(UpperCamelCase__ , UpperCamelCase__ )
assert isinstance(exported_content[0] , UpperCamelCase__ )
assert len(UpperCamelCase__ ) == 10
@pytest.mark.parametrize(
"""orient, container, keys, len_at""" , [
("""records""", list, {"""tokens""", """labels""", """answers""", """id"""}, None),
("""split""", dict, {"""columns""", """data"""}, """data"""),
("""index""", dict, set("""0123456789""" ), None),
("""columns""", dict, {"""tokens""", """labels""", """answers""", """id"""}, """tokens"""),
("""values""", list, None, None),
("""table""", dict, {"""schema""", """data"""}, """data"""),
] , )
def UpperCAmelCase__ ( self : Any , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : str , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int ):
'''simple docstring'''
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase__ , UpperCamelCase__ , lines=UpperCamelCase__ , orient=UpperCamelCase__ ).write()
buffer.seek(0 )
lowercase_ = load_json(UpperCamelCase__ )
assert isinstance(UpperCamelCase__ , UpperCamelCase__ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(UpperCamelCase__ , """keys""" ) and not hasattr(exported_content[0] , """keys""" )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(UpperCamelCase__ ) == 10
@pytest.mark.parametrize("""lines, load_json_function""" , [(True, load_json_lines), (False, load_json)] )
def UpperCAmelCase__ ( self : Optional[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : int , UpperCamelCase__ : Dict ):
'''simple docstring'''
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase__ , UpperCamelCase__ , lines=UpperCamelCase__ , num_proc=2 ).write()
buffer.seek(0 )
lowercase_ = load_json_function(UpperCamelCase__ )
assert isinstance(UpperCamelCase__ , UpperCamelCase__ )
assert isinstance(exported_content[0] , UpperCamelCase__ )
assert len(UpperCamelCase__ ) == 10
@pytest.mark.parametrize(
"""orient, container, keys, len_at""" , [
("""records""", list, {"""tokens""", """labels""", """answers""", """id"""}, None),
("""split""", dict, {"""columns""", """data"""}, """data"""),
("""index""", dict, set("""0123456789""" ), None),
("""columns""", dict, {"""tokens""", """labels""", """answers""", """id"""}, """tokens"""),
("""values""", list, None, None),
("""table""", dict, {"""schema""", """data"""}, """data"""),
] , )
def UpperCAmelCase__ ( self : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[str] , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[int] ):
'''simple docstring'''
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase__ , UpperCamelCase__ , lines=UpperCamelCase__ , orient=UpperCamelCase__ , num_proc=2 ).write()
buffer.seek(0 )
lowercase_ = load_json(UpperCamelCase__ )
assert isinstance(UpperCamelCase__ , UpperCamelCase__ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(UpperCamelCase__ , """keys""" ) and not hasattr(exported_content[0] , """keys""" )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(UpperCamelCase__ ) == 10
def UpperCAmelCase__ ( self : Dict , UpperCamelCase__ : Tuple ):
'''simple docstring'''
with pytest.raises(UpperCamelCase__ ):
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase__ , UpperCamelCase__ , num_proc=0 )
@pytest.mark.parametrize("""compression, extension""" , [("""gzip""", """gz"""), ("""bz2""", """bz2"""), ("""xz""", """xz""")] )
def UpperCAmelCase__ ( self : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Tuple ):
'''simple docstring'''
lowercase_ = tmp_path_factory.mktemp("""data""" ) / F'''test.json.{extension}'''
lowercase_ = str(shared_datadir / F'''test_file.json.{extension}''' )
JsonDatasetWriter(UpperCamelCase__ , UpperCamelCase__ , compression=UpperCamelCase__ ).write()
with fsspec.open(UpperCamelCase__ , """rb""" , compression="""infer""" ) as f:
lowercase_ = f.read()
with fsspec.open(UpperCamelCase__ , """rb""" , compression="""infer""" ) as f:
lowercase_ = f.read()
assert exported_content == original_content
| 650
|
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMInverseScheduler,
DDIMScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
StableDiffusionDiffEditPipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_image, slow
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class UpperCamelCase__ ( __magic_name__ , __magic_name__ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Optional[int] = StableDiffusionDiffEditPipeline
__SCREAMING_SNAKE_CASE : Optional[Any] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'height', 'width', 'image'} | {'image_latents'}
__SCREAMING_SNAKE_CASE : List[str] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {'image'} | {'image_latents'}
__SCREAMING_SNAKE_CASE : int = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__SCREAMING_SNAKE_CASE : Any = frozenset([] )
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=UpperCamelCase__ , )
lowercase_ = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=UpperCamelCase__ , set_alpha_to_one=UpperCamelCase__ , )
lowercase_ = DDIMInverseScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=UpperCamelCase__ , set_alpha_to_zero=UpperCamelCase__ , )
torch.manual_seed(0 )
lowercase_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
lowercase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="""gelu""" , projection_dim=512 , )
lowercase_ = CLIPTextModel(UpperCamelCase__ )
lowercase_ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
lowercase_ = {
"""unet""": unet,
"""scheduler""": scheduler,
"""inverse_scheduler""": inverse_scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def UpperCAmelCase__ ( self : int , UpperCamelCase__ : Any , UpperCamelCase__ : Any=0 ):
'''simple docstring'''
lowercase_ = floats_tensor((1, 16, 16) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
lowercase_ = floats_tensor((1, 2, 4, 16, 16) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
if str(UpperCamelCase__ ).startswith("""mps""" ):
lowercase_ = torch.manual_seed(UpperCamelCase__ )
else:
lowercase_ = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
lowercase_ = {
"""prompt""": """a dog and a newt""",
"""mask_image""": mask,
"""image_latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 2,
"""inpaint_strength""": 1.0,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def UpperCAmelCase__ ( self : Optional[int] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : str=0 ):
'''simple docstring'''
lowercase_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
lowercase_ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowercase_ = Image.fromarray(np.uinta(UpperCamelCase__ ) ).convert("""RGB""" )
if str(UpperCamelCase__ ).startswith("""mps""" ):
lowercase_ = torch.manual_seed(UpperCamelCase__ )
else:
lowercase_ = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
lowercase_ = {
"""image""": image,
"""source_prompt""": """a cat and a frog""",
"""target_prompt""": """a dog and a newt""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""num_maps_per_mask""": 2,
"""mask_encode_strength""": 1.0,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def UpperCAmelCase__ ( self : List[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple=0 ):
'''simple docstring'''
lowercase_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
lowercase_ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowercase_ = Image.fromarray(np.uinta(UpperCamelCase__ ) ).convert("""RGB""" )
if str(UpperCamelCase__ ).startswith("""mps""" ):
lowercase_ = torch.manual_seed(UpperCamelCase__ )
else:
lowercase_ = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
lowercase_ = {
"""image""": image,
"""prompt""": """a cat and a frog""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""inpaint_strength""": 1.0,
"""guidance_scale""": 6.0,
"""decode_latents""": True,
"""output_type""": """numpy""",
}
return inputs
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
if not hasattr(self.pipeline_class , """_optional_components""" ):
return
lowercase_ = self.get_dummy_components()
lowercase_ = self.pipeline_class(**UpperCamelCase__ )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
# set all optional components to None and update pipeline config accordingly
for optional_component in pipe._optional_components:
setattr(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} )
lowercase_ = self.get_dummy_inputs(UpperCamelCase__ )
lowercase_ = pipe(**UpperCamelCase__ )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(UpperCamelCase__ )
lowercase_ = self.pipeline_class.from_pretrained(UpperCamelCase__ )
pipe_loaded.to(UpperCamelCase__ )
pipe_loaded.set_progress_bar_config(disable=UpperCamelCase__ )
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(UpperCamelCase__ , UpperCamelCase__ ) is None , F'''`{optional_component}` did not stay set to None after loading.''' , )
lowercase_ = self.get_dummy_inputs(UpperCamelCase__ )
lowercase_ = pipe_loaded(**UpperCamelCase__ )[0]
lowercase_ = np.abs(output - output_loaded ).max()
self.assertLess(UpperCamelCase__ , 1e-4 )
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase_ = """cpu"""
lowercase_ = self.get_dummy_components()
lowercase_ = self.pipeline_class(**UpperCamelCase__ )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
lowercase_ = self.get_dummy_mask_inputs(UpperCamelCase__ )
lowercase_ = pipe.generate_mask(**UpperCamelCase__ )
lowercase_ = mask[0, -3:, -3:]
self.assertEqual(mask.shape , (1, 16, 16) )
lowercase_ = np.array([0] * 9 )
lowercase_ = np.abs(mask_slice.flatten() - expected_slice ).max()
self.assertLessEqual(UpperCamelCase__ , 1e-3 )
self.assertEqual(mask[0, -3, -4] , 0 )
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ = """cpu"""
lowercase_ = self.get_dummy_components()
lowercase_ = self.pipeline_class(**UpperCamelCase__ )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
lowercase_ = self.get_dummy_inversion_inputs(UpperCamelCase__ )
lowercase_ = pipe.invert(**UpperCamelCase__ ).images
lowercase_ = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
lowercase_ = np.array(
[0.5_150, 0.5_134, 0.5_043, 0.5_376, 0.4_694, 0.51_050, 0.5_015, 0.4_407, 0.4_799] , )
lowercase_ = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(UpperCamelCase__ , 1e-3 )
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=5e-3 )
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ = """cpu"""
lowercase_ = self.get_dummy_components()
lowercase_ = {"""beta_start""": 0.00_085, """beta_end""": 0.012, """beta_schedule""": """scaled_linear"""}
lowercase_ = DPMSolverMultistepScheduler(**UpperCamelCase__ )
lowercase_ = DPMSolverMultistepInverseScheduler(**UpperCamelCase__ )
lowercase_ = self.pipeline_class(**UpperCamelCase__ )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
lowercase_ = self.get_dummy_inversion_inputs(UpperCamelCase__ )
lowercase_ = pipe.invert(**UpperCamelCase__ ).images
lowercase_ = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
lowercase_ = np.array(
[0.5_150, 0.5_134, 0.5_043, 0.5_376, 0.4_694, 0.51_050, 0.5_015, 0.4_407, 0.4_799] , )
lowercase_ = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(UpperCamelCase__ , 1e-3 )
@require_torch_gpu
@slow
class UpperCamelCase__ ( unittest.TestCase ):
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@classmethod
def UpperCAmelCase__ ( cls : Dict ):
'''simple docstring'''
lowercase_ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png""" )
lowercase_ = raw_image.convert("""RGB""" ).resize((768, 768) )
lowercase_ = raw_image
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
lowercase_ = torch.manual_seed(0 )
lowercase_ = StableDiffusionDiffEditPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2-1""" , safety_checker=UpperCamelCase__ , torch_dtype=torch.floataa )
lowercase_ = DDIMScheduler.from_config(pipe.scheduler.config )
lowercase_ = DDIMInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
lowercase_ = """a bowl of fruit"""
lowercase_ = """a bowl of pears"""
lowercase_ = pipe.generate_mask(
image=self.raw_image , source_prompt=UpperCamelCase__ , target_prompt=UpperCamelCase__ , generator=UpperCamelCase__ , )
lowercase_ = pipe.invert(
prompt=UpperCamelCase__ , image=self.raw_image , inpaint_strength=0.7 , generator=UpperCamelCase__ ).latents
lowercase_ = pipe(
prompt=UpperCamelCase__ , mask_image=UpperCamelCase__ , image_latents=UpperCamelCase__ , generator=UpperCamelCase__ , negative_prompt=UpperCamelCase__ , inpaint_strength=0.7 , output_type="""numpy""" , ).images[0]
lowercase_ = (
np.array(
load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/diffedit/pears.png""" ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5e-1
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ = torch.manual_seed(0 )
lowercase_ = StableDiffusionDiffEditPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2-1""" , safety_checker=UpperCamelCase__ , torch_dtype=torch.floataa )
lowercase_ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
lowercase_ = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
lowercase_ = """a bowl of fruit"""
lowercase_ = """a bowl of pears"""
lowercase_ = pipe.generate_mask(
image=self.raw_image , source_prompt=UpperCamelCase__ , target_prompt=UpperCamelCase__ , generator=UpperCamelCase__ , )
lowercase_ = pipe.invert(
prompt=UpperCamelCase__ , image=self.raw_image , inpaint_strength=0.7 , generator=UpperCamelCase__ , num_inference_steps=25 , ).latents
lowercase_ = pipe(
prompt=UpperCamelCase__ , mask_image=UpperCamelCase__ , image_latents=UpperCamelCase__ , generator=UpperCamelCase__ , negative_prompt=UpperCamelCase__ , inpaint_strength=0.7 , num_inference_steps=25 , output_type="""numpy""" , ).images[0]
lowercase_ = (
np.array(
load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/diffedit/pears.png""" ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5e-1
| 650
| 1
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a = logging.get_logger(__name__)
a = {
'junnyu/roformer_chinese_small': 'https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json',
'junnyu/roformer_chinese_base': 'https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json',
'junnyu/roformer_chinese_char_small': (
'https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json'
),
'junnyu/roformer_chinese_char_base': (
'https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json'
),
'junnyu/roformer_small_discriminator': (
'https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json'
),
'junnyu/roformer_small_generator': (
'https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json'
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class UpperCamelCase__ ( __magic_name__ ):
__SCREAMING_SNAKE_CASE : Optional[int] = 'roformer'
def __init__( self : Union[str, Any] , UpperCamelCase__ : Optional[Any]=50_000 , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : Tuple=768 , UpperCamelCase__ : Optional[int]=12 , UpperCamelCase__ : List[str]=12 , UpperCamelCase__ : List[Any]=3_072 , UpperCamelCase__ : Dict="gelu" , UpperCamelCase__ : Tuple=0.1 , UpperCamelCase__ : List[str]=0.1 , UpperCamelCase__ : int=1_536 , UpperCamelCase__ : str=2 , UpperCamelCase__ : List[Any]=0.02 , UpperCamelCase__ : List[Any]=1e-12 , UpperCamelCase__ : Union[str, Any]=0 , UpperCamelCase__ : Optional[Any]=False , UpperCamelCase__ : Optional[Any]=True , **UpperCamelCase__ : List[str] , ):
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase__ , **UpperCamelCase__ )
lowercase_ = vocab_size
lowercase_ = hidden_size if embedding_size is None else embedding_size
lowercase_ = hidden_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = hidden_act
lowercase_ = intermediate_size
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = max_position_embeddings
lowercase_ = type_vocab_size
lowercase_ = initializer_range
lowercase_ = layer_norm_eps
lowercase_ = rotary_value
lowercase_ = use_cache
class UpperCamelCase__ ( __magic_name__ ):
@property
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
if self.task == "multiple-choice":
lowercase_ = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
lowercase_ = {0: """batch""", 1: """sequence"""}
lowercase_ = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 650
|
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
a = logging.get_logger(__name__)
class UpperCamelCase__ ( __magic_name__ ):
__SCREAMING_SNAKE_CASE : str = ['pixel_values']
def __init__( self : List[Any] , UpperCamelCase__ : bool = True , UpperCamelCase__ : Union[int, float] = 1 / 255 , UpperCamelCase__ : bool = True , UpperCamelCase__ : int = 8 , **UpperCamelCase__ : Optional[Any] , ):
'''simple docstring'''
super().__init__(**UpperCamelCase__ )
lowercase_ = do_rescale
lowercase_ = rescale_factor
lowercase_ = do_pad
lowercase_ = pad_size
def UpperCAmelCase__ ( self : int , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : float , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
return rescale(UpperCamelCase__ , scale=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def UpperCAmelCase__ ( self : List[Any] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : int , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None ):
'''simple docstring'''
lowercase_ , lowercase_ = get_image_size(UpperCamelCase__ )
lowercase_ = (old_height // size + 1) * size - old_height
lowercase_ = (old_width // size + 1) * size - old_width
return pad(UpperCamelCase__ , ((0, pad_height), (0, pad_width)) , mode="""symmetric""" , data_format=UpperCamelCase__ )
def UpperCAmelCase__ ( self : Optional[int] , UpperCamelCase__ : ImageInput , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[float] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , UpperCamelCase__ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **UpperCamelCase__ : Dict , ):
'''simple docstring'''
lowercase_ = do_rescale if do_rescale is not None else self.do_rescale
lowercase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase_ = do_pad if do_pad is not None else self.do_pad
lowercase_ = pad_size if pad_size is not None else self.pad_size
lowercase_ = make_list_of_images(UpperCamelCase__ )
if not valid_images(UpperCamelCase__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
# All transformations expect numpy arrays.
lowercase_ = [to_numpy_array(UpperCamelCase__ ) for image in images]
if do_rescale:
lowercase_ = [self.rescale(image=UpperCamelCase__ , scale=UpperCamelCase__ ) for image in images]
if do_pad:
lowercase_ = [self.pad(UpperCamelCase__ , size=UpperCamelCase__ ) for image in images]
lowercase_ = [to_channel_dimension_format(UpperCamelCase__ , UpperCamelCase__ ) for image in images]
lowercase_ = {"""pixel_values""": images}
return BatchFeature(data=UpperCamelCase__ , tensor_type=UpperCamelCase__ )
| 650
| 1
|
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMInverseScheduler,
DDIMScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
StableDiffusionDiffEditPipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_image, slow
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class UpperCamelCase__ ( __magic_name__ , __magic_name__ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Optional[int] = StableDiffusionDiffEditPipeline
__SCREAMING_SNAKE_CASE : Optional[Any] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'height', 'width', 'image'} | {'image_latents'}
__SCREAMING_SNAKE_CASE : List[str] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {'image'} | {'image_latents'}
__SCREAMING_SNAKE_CASE : int = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__SCREAMING_SNAKE_CASE : Any = frozenset([] )
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=UpperCamelCase__ , )
lowercase_ = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=UpperCamelCase__ , set_alpha_to_one=UpperCamelCase__ , )
lowercase_ = DDIMInverseScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=UpperCamelCase__ , set_alpha_to_zero=UpperCamelCase__ , )
torch.manual_seed(0 )
lowercase_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
lowercase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="""gelu""" , projection_dim=512 , )
lowercase_ = CLIPTextModel(UpperCamelCase__ )
lowercase_ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
lowercase_ = {
"""unet""": unet,
"""scheduler""": scheduler,
"""inverse_scheduler""": inverse_scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def UpperCAmelCase__ ( self : int , UpperCamelCase__ : Any , UpperCamelCase__ : Any=0 ):
'''simple docstring'''
lowercase_ = floats_tensor((1, 16, 16) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
lowercase_ = floats_tensor((1, 2, 4, 16, 16) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
if str(UpperCamelCase__ ).startswith("""mps""" ):
lowercase_ = torch.manual_seed(UpperCamelCase__ )
else:
lowercase_ = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
lowercase_ = {
"""prompt""": """a dog and a newt""",
"""mask_image""": mask,
"""image_latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 2,
"""inpaint_strength""": 1.0,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def UpperCAmelCase__ ( self : Optional[int] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : str=0 ):
'''simple docstring'''
lowercase_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
lowercase_ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowercase_ = Image.fromarray(np.uinta(UpperCamelCase__ ) ).convert("""RGB""" )
if str(UpperCamelCase__ ).startswith("""mps""" ):
lowercase_ = torch.manual_seed(UpperCamelCase__ )
else:
lowercase_ = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
lowercase_ = {
"""image""": image,
"""source_prompt""": """a cat and a frog""",
"""target_prompt""": """a dog and a newt""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""num_maps_per_mask""": 2,
"""mask_encode_strength""": 1.0,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def UpperCAmelCase__ ( self : List[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple=0 ):
'''simple docstring'''
lowercase_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
lowercase_ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowercase_ = Image.fromarray(np.uinta(UpperCamelCase__ ) ).convert("""RGB""" )
if str(UpperCamelCase__ ).startswith("""mps""" ):
lowercase_ = torch.manual_seed(UpperCamelCase__ )
else:
lowercase_ = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
lowercase_ = {
"""image""": image,
"""prompt""": """a cat and a frog""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""inpaint_strength""": 1.0,
"""guidance_scale""": 6.0,
"""decode_latents""": True,
"""output_type""": """numpy""",
}
return inputs
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
if not hasattr(self.pipeline_class , """_optional_components""" ):
return
lowercase_ = self.get_dummy_components()
lowercase_ = self.pipeline_class(**UpperCamelCase__ )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
# set all optional components to None and update pipeline config accordingly
for optional_component in pipe._optional_components:
setattr(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} )
lowercase_ = self.get_dummy_inputs(UpperCamelCase__ )
lowercase_ = pipe(**UpperCamelCase__ )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(UpperCamelCase__ )
lowercase_ = self.pipeline_class.from_pretrained(UpperCamelCase__ )
pipe_loaded.to(UpperCamelCase__ )
pipe_loaded.set_progress_bar_config(disable=UpperCamelCase__ )
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(UpperCamelCase__ , UpperCamelCase__ ) is None , F'''`{optional_component}` did not stay set to None after loading.''' , )
lowercase_ = self.get_dummy_inputs(UpperCamelCase__ )
lowercase_ = pipe_loaded(**UpperCamelCase__ )[0]
lowercase_ = np.abs(output - output_loaded ).max()
self.assertLess(UpperCamelCase__ , 1e-4 )
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase_ = """cpu"""
lowercase_ = self.get_dummy_components()
lowercase_ = self.pipeline_class(**UpperCamelCase__ )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
lowercase_ = self.get_dummy_mask_inputs(UpperCamelCase__ )
lowercase_ = pipe.generate_mask(**UpperCamelCase__ )
lowercase_ = mask[0, -3:, -3:]
self.assertEqual(mask.shape , (1, 16, 16) )
lowercase_ = np.array([0] * 9 )
lowercase_ = np.abs(mask_slice.flatten() - expected_slice ).max()
self.assertLessEqual(UpperCamelCase__ , 1e-3 )
self.assertEqual(mask[0, -3, -4] , 0 )
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ = """cpu"""
lowercase_ = self.get_dummy_components()
lowercase_ = self.pipeline_class(**UpperCamelCase__ )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
lowercase_ = self.get_dummy_inversion_inputs(UpperCamelCase__ )
lowercase_ = pipe.invert(**UpperCamelCase__ ).images
lowercase_ = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
lowercase_ = np.array(
[0.5_150, 0.5_134, 0.5_043, 0.5_376, 0.4_694, 0.51_050, 0.5_015, 0.4_407, 0.4_799] , )
lowercase_ = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(UpperCamelCase__ , 1e-3 )
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=5e-3 )
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ = """cpu"""
lowercase_ = self.get_dummy_components()
lowercase_ = {"""beta_start""": 0.00_085, """beta_end""": 0.012, """beta_schedule""": """scaled_linear"""}
lowercase_ = DPMSolverMultistepScheduler(**UpperCamelCase__ )
lowercase_ = DPMSolverMultistepInverseScheduler(**UpperCamelCase__ )
lowercase_ = self.pipeline_class(**UpperCamelCase__ )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
lowercase_ = self.get_dummy_inversion_inputs(UpperCamelCase__ )
lowercase_ = pipe.invert(**UpperCamelCase__ ).images
lowercase_ = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
lowercase_ = np.array(
[0.5_150, 0.5_134, 0.5_043, 0.5_376, 0.4_694, 0.51_050, 0.5_015, 0.4_407, 0.4_799] , )
lowercase_ = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(UpperCamelCase__ , 1e-3 )
@require_torch_gpu
@slow
class UpperCamelCase__ ( unittest.TestCase ):
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@classmethod
def UpperCAmelCase__ ( cls : Dict ):
'''simple docstring'''
lowercase_ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png""" )
lowercase_ = raw_image.convert("""RGB""" ).resize((768, 768) )
lowercase_ = raw_image
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
lowercase_ = torch.manual_seed(0 )
lowercase_ = StableDiffusionDiffEditPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2-1""" , safety_checker=UpperCamelCase__ , torch_dtype=torch.floataa )
lowercase_ = DDIMScheduler.from_config(pipe.scheduler.config )
lowercase_ = DDIMInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
lowercase_ = """a bowl of fruit"""
lowercase_ = """a bowl of pears"""
lowercase_ = pipe.generate_mask(
image=self.raw_image , source_prompt=UpperCamelCase__ , target_prompt=UpperCamelCase__ , generator=UpperCamelCase__ , )
lowercase_ = pipe.invert(
prompt=UpperCamelCase__ , image=self.raw_image , inpaint_strength=0.7 , generator=UpperCamelCase__ ).latents
lowercase_ = pipe(
prompt=UpperCamelCase__ , mask_image=UpperCamelCase__ , image_latents=UpperCamelCase__ , generator=UpperCamelCase__ , negative_prompt=UpperCamelCase__ , inpaint_strength=0.7 , output_type="""numpy""" , ).images[0]
lowercase_ = (
np.array(
load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/diffedit/pears.png""" ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5e-1
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ = torch.manual_seed(0 )
lowercase_ = StableDiffusionDiffEditPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2-1""" , safety_checker=UpperCamelCase__ , torch_dtype=torch.floataa )
lowercase_ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
lowercase_ = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
lowercase_ = """a bowl of fruit"""
lowercase_ = """a bowl of pears"""
lowercase_ = pipe.generate_mask(
image=self.raw_image , source_prompt=UpperCamelCase__ , target_prompt=UpperCamelCase__ , generator=UpperCamelCase__ , )
lowercase_ = pipe.invert(
prompt=UpperCamelCase__ , image=self.raw_image , inpaint_strength=0.7 , generator=UpperCamelCase__ , num_inference_steps=25 , ).latents
lowercase_ = pipe(
prompt=UpperCamelCase__ , mask_image=UpperCamelCase__ , image_latents=UpperCamelCase__ , generator=UpperCamelCase__ , negative_prompt=UpperCamelCase__ , inpaint_strength=0.7 , num_inference_steps=25 , output_type="""numpy""" , ).images[0]
lowercase_ = (
np.array(
load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/diffedit/pears.png""" ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5e-1
| 650
|
def UpperCAmelCase_ ( UpperCAmelCase__ ):
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
raise TypeError("""Input value must be an 'int' type""" )
lowercase_ = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 650
| 1
|
import argparse
from copy import deepcopy
import numpy as np
from datasets import ClassLabel, DatasetDict, load_dataset
from evaluate import load
from transformers import (
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
Trainer,
TrainerCallback,
TrainingArguments,
set_seed,
)
def UpperCAmelCase_ ( ):
lowercase_ = argparse.ArgumentParser()
parser.add_argument("""--model_ckpt""" , type=UpperCAmelCase__ , default="""microsoft/unixcoder-base-nine""" )
parser.add_argument("""--num_epochs""" , type=UpperCAmelCase__ , default=5 )
parser.add_argument("""--batch_size""" , type=UpperCAmelCase__ , default=6 )
parser.add_argument("""--gradient_accumulation_steps""" , type=UpperCAmelCase__ , default=1 )
parser.add_argument("""--freeze""" , type=UpperCAmelCase__ , default=UpperCAmelCase__ )
parser.add_argument("""--learning_rate""" , type=UpperCAmelCase__ , default=5e-4 )
parser.add_argument("""--seed""" , type=UpperCAmelCase__ , default=0 )
parser.add_argument("""--lr_scheduler_type""" , type=UpperCAmelCase__ , default="""cosine""" )
parser.add_argument("""--num_warmup_steps""" , type=UpperCAmelCase__ , default=1_0 )
parser.add_argument("""--weight_decay""" , type=UpperCAmelCase__ , default=0.01 )
parser.add_argument("""--output_dir""" , type=UpperCAmelCase__ , default="""./results""" )
return parser.parse_args()
a = load('accuracy')
def UpperCAmelCase_ ( UpperCAmelCase__ ):
lowercase_ , lowercase_ = eval_pred
lowercase_ = np.argmax(UpperCAmelCase__ , axis=1 )
return metric.compute(predictions=UpperCAmelCase__ , references=UpperCAmelCase__ )
class UpperCamelCase__ ( __magic_name__ ):
def __init__( self : Tuple , UpperCamelCase__ : int ):
'''simple docstring'''
super().__init__()
lowercase_ = trainer
def UpperCAmelCase__ ( self : Any , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int , **UpperCamelCase__ : str ):
'''simple docstring'''
if control.should_evaluate:
lowercase_ = deepcopy(UpperCamelCase__ )
self._trainer.evaluate(eval_dataset=self._trainer.train_dataset , metric_key_prefix="""train""" )
return control_copy
def UpperCAmelCase_ ( ):
lowercase_ = get_args()
set_seed(args.seed )
lowercase_ = load_dataset("""codeparrot/codecomplex""" , split="""train""" )
lowercase_ = dataset.train_test_split(test_size=0.2 )
lowercase_ = train_test["""test"""].train_test_split(test_size=0.5 )
lowercase_ = DatasetDict(
{
"""train""": train_test["""train"""],
"""test""": test_validation["""train"""],
"""valid""": test_validation["""test"""],
} )
print("""Loading tokenizer and model""" )
lowercase_ = AutoTokenizer.from_pretrained(args.model_ckpt )
lowercase_ = tokenizer.eos_token
lowercase_ = AutoModelForSequenceClassification.from_pretrained(args.model_ckpt , num_labels=7 )
lowercase_ = model.config.eos_token_id
if args.freeze:
for param in model.roberta.parameters():
lowercase_ = False
lowercase_ = ClassLabel(num_classes=7 , names=list(set(train_test_validation["""train"""]["""complexity"""] ) ) )
def tokenize(UpperCAmelCase__ ):
lowercase_ = tokenizer(example["""src"""] , truncation=UpperCAmelCase__ , max_length=1_0_2_4 )
lowercase_ = labels.straint(example["""complexity"""] )
return {
"input_ids": inputs["input_ids"],
"attention_mask": inputs["attention_mask"],
"label": label,
}
lowercase_ = train_test_validation.map(
UpperCAmelCase__ , batched=UpperCAmelCase__ , remove_columns=train_test_validation["""train"""].column_names , )
lowercase_ = DataCollatorWithPadding(tokenizer=UpperCAmelCase__ )
lowercase_ = TrainingArguments(
output_dir=args.output_dir , learning_rate=args.learning_rate , lr_scheduler_type=args.lr_scheduler_type , evaluation_strategy="""epoch""" , save_strategy="""epoch""" , logging_strategy="""epoch""" , per_device_train_batch_size=args.batch_size , per_device_eval_batch_size=args.batch_size , num_train_epochs=args.num_epochs , gradient_accumulation_steps=args.gradient_accumulation_steps , weight_decay=0.01 , metric_for_best_model="""accuracy""" , run_name="""complexity-java""" , report_to="""wandb""" , )
lowercase_ = Trainer(
model=UpperCAmelCase__ , args=UpperCAmelCase__ , train_dataset=tokenized_datasets["""train"""] , eval_dataset=tokenized_datasets["""valid"""] , tokenizer=UpperCAmelCase__ , data_collator=UpperCAmelCase__ , compute_metrics=UpperCAmelCase__ , )
print("""Training...""" )
trainer.add_callback(CustomCallback(UpperCAmelCase__ ) )
trainer.train()
if __name__ == "__main__":
main()
| 650
|
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class UpperCamelCase__ ( __magic_name__ , __magic_name__ , __magic_name__ ):
@register_to_config
def __init__( self : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : float , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : str , UpperCamelCase__ : bool = False , ):
'''simple docstring'''
super().__init__()
lowercase_ = nn.Embedding(UpperCamelCase__ , UpperCamelCase__ )
lowercase_ = nn.Embedding(UpperCamelCase__ , UpperCamelCase__ )
lowercase_ = False
lowercase_ = nn.Dropout(p=UpperCamelCase__ )
lowercase_ = TaConfig(
vocab_size=UpperCamelCase__ , d_model=UpperCamelCase__ , num_heads=UpperCamelCase__ , d_kv=UpperCamelCase__ , d_ff=UpperCamelCase__ , dropout_rate=UpperCamelCase__ , feed_forward_proj=UpperCamelCase__ , is_decoder=UpperCamelCase__ , is_encoder_decoder=UpperCamelCase__ , )
lowercase_ = nn.ModuleList()
for lyr_num in range(UpperCamelCase__ ):
lowercase_ = TaBlock(UpperCamelCase__ )
self.encoders.append(UpperCamelCase__ )
lowercase_ = TaLayerNorm(UpperCamelCase__ )
lowercase_ = nn.Dropout(p=UpperCamelCase__ )
def UpperCAmelCase__ ( self : Optional[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : str ):
'''simple docstring'''
lowercase_ = self.token_embedder(UpperCamelCase__ )
lowercase_ = encoder_input_tokens.shape[1]
lowercase_ = torch.arange(UpperCamelCase__ , device=encoder_input_tokens.device )
x += self.position_encoding(UpperCamelCase__ )
lowercase_ = self.dropout_pre(UpperCamelCase__ )
# inverted the attention mask
lowercase_ = encoder_input_tokens.size()
lowercase_ = self.get_extended_attention_mask(UpperCamelCase__ , UpperCamelCase__ )
for lyr in self.encoders:
lowercase_ = lyr(UpperCamelCase__ , UpperCamelCase__ )[0]
lowercase_ = self.layer_norm(UpperCamelCase__ )
return self.dropout_post(UpperCamelCase__ ), encoder_inputs_mask
| 650
| 1
|
def UpperCAmelCase_ ( UpperCAmelCase__ = 1_0 ):
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) or n < 0:
raise ValueError("""Invalid input""" )
lowercase_ = 1_0**n
lowercase_ = 2_8_4_3_3 * (pow(2 , 7_8_3_0_4_5_7 , UpperCAmelCase__ )) + 1
return str(number % modulus )
if __name__ == "__main__":
from doctest import testmod
testmod()
print(F'''{solution(1_0) = }''')
| 650
|
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
a = TypeVar('T')
class UpperCamelCase__ ( Generic[T] ):
__SCREAMING_SNAKE_CASE : deque[T] # Cache store of keys
__SCREAMING_SNAKE_CASE : set[T] # References of the keys in cache
__SCREAMING_SNAKE_CASE : int = 10 # Maximum capacity of cache
def __init__( self : str , UpperCamelCase__ : int ):
'''simple docstring'''
lowercase_ = deque()
lowercase_ = set()
if not n:
lowercase_ = sys.maxsize
elif n < 0:
raise ValueError("""n should be an integer greater than 0.""" )
else:
lowercase_ = n
def UpperCAmelCase__ ( self : Optional[int] , UpperCamelCase__ : T ):
'''simple docstring'''
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
lowercase_ = self.dq_store.pop()
self.key_reference.remove(UpperCamelCase__ )
else:
self.dq_store.remove(UpperCamelCase__ )
self.dq_store.appendleft(UpperCamelCase__ )
self.key_reference.add(UpperCamelCase__ )
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
for k in self.dq_store:
print(UpperCamelCase__ )
def __repr__( self : Optional[Any] ):
'''simple docstring'''
return F'''LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}'''
if __name__ == "__main__":
import doctest
doctest.testmod()
a = LRUCache(4)
lru_cache.refer('A')
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer('A')
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 650
| 1
|
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_torch_available():
import torch
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
a = logging.get_logger(__name__)
@dataclass
class UpperCamelCase__ ( __magic_name__ ):
__SCREAMING_SNAKE_CASE : int = [
'no_inference',
'no_cuda',
'no_tpu',
'no_speed',
'no_memory',
'no_env_print',
'no_multi_process',
]
def __init__( self : List[Any] , **UpperCamelCase__ : List[str] ):
'''simple docstring'''
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
lowercase_ = deprecated_arg[3:]
setattr(self , UpperCamelCase__ , not kwargs.pop(UpperCamelCase__ ) )
logger.warning(
F'''{deprecated_arg} is depreciated. Please use --no_{positive_arg} or'''
F''' {positive_arg}={kwargs[positive_arg]}''' )
lowercase_ = kwargs.pop("""torchscript""" , self.torchscript )
lowercase_ = kwargs.pop("""torch_xla_tpu_print_metrics""" , self.torch_xla_tpu_print_metrics )
lowercase_ = kwargs.pop("""fp16_opt_level""" , self.fpaa_opt_level )
super().__init__(**UpperCamelCase__ )
__SCREAMING_SNAKE_CASE : bool = field(default=__magic_name__ , metadata={'help': 'Trace the models using torchscript'} )
__SCREAMING_SNAKE_CASE : bool = field(default=__magic_name__ , metadata={'help': 'Print Xla/PyTorch tpu metrics'} )
__SCREAMING_SNAKE_CASE : str = field(
default='O1' , metadata={
'help': (
'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\']. '
'See details at https://nvidia.github.io/apex/amp.html'
)
} , )
@cached_property
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
requires_backends(self , ["""torch"""] )
logger.info("""PyTorch: setting up devices""" )
if not self.cuda:
lowercase_ = torch.device("""cpu""" )
lowercase_ = 0
elif is_torch_tpu_available():
lowercase_ = xm.xla_device()
lowercase_ = 0
else:
lowercase_ = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" )
lowercase_ = torch.cuda.device_count()
return device, n_gpu
@property
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
return is_torch_tpu_available() and self.tpu
@property
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
requires_backends(self , ["""torch"""] )
# TODO(PVP): currently only single GPU is supported
return torch.cuda.current_device()
@property
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
requires_backends(self , ["""torch"""] )
return self._setup_devices[0]
@property
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
requires_backends(self , ["""torch"""] )
return self._setup_devices[1]
@property
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
return self.n_gpu > 0
| 650
|
def UpperCAmelCase_ ( UpperCAmelCase__ ):
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 650
| 1
|
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a = logging.get_logger(__name__)
# TODO Update this
a = {
'facebook/esm-1b': 'https://huggingface.co/facebook/esm-1b/resolve/main/config.json',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class UpperCamelCase__ ( __magic_name__ ):
__SCREAMING_SNAKE_CASE : Tuple = 'esm'
def __init__( self : Optional[Any] , UpperCamelCase__ : Dict=None , UpperCamelCase__ : Tuple=None , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : List[Any]=768 , UpperCamelCase__ : Tuple=12 , UpperCamelCase__ : Optional[int]=12 , UpperCamelCase__ : Dict=3_072 , UpperCamelCase__ : Union[str, Any]=0.1 , UpperCamelCase__ : Dict=0.1 , UpperCamelCase__ : Optional[int]=1_026 , UpperCamelCase__ : Any=0.02 , UpperCamelCase__ : Dict=1e-12 , UpperCamelCase__ : List[str]="absolute" , UpperCamelCase__ : Union[str, Any]=True , UpperCamelCase__ : Tuple=None , UpperCamelCase__ : Union[str, Any]=False , UpperCamelCase__ : Optional[int]=False , UpperCamelCase__ : Dict=None , UpperCamelCase__ : str=None , **UpperCamelCase__ : str , ):
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase__ , mask_token_id=UpperCamelCase__ , **UpperCamelCase__ )
lowercase_ = vocab_size
lowercase_ = hidden_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = intermediate_size
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = max_position_embeddings
lowercase_ = initializer_range
lowercase_ = layer_norm_eps
lowercase_ = position_embedding_type
lowercase_ = use_cache
lowercase_ = emb_layer_norm_before
lowercase_ = token_dropout
lowercase_ = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info("""No esmfold_config supplied for folding model, using default values.""" )
lowercase_ = EsmFoldConfig()
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
lowercase_ = EsmFoldConfig(**UpperCamelCase__ )
lowercase_ = esmfold_config
if vocab_list is None:
logger.warning("""No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!""" )
lowercase_ = get_default_vocab_list()
else:
lowercase_ = vocab_list
else:
lowercase_ = None
lowercase_ = None
if self.esmfold_config is not None and getattr(self.esmfold_config , """use_esm_attn_map""" , UpperCamelCase__ ):
raise ValueError("""The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!""" )
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
lowercase_ = super().to_dict()
if isinstance(self.esmfold_config , UpperCamelCase__ ):
lowercase_ = self.esmfold_config.to_dict()
return output
@dataclass
class UpperCamelCase__ :
__SCREAMING_SNAKE_CASE : str = None
__SCREAMING_SNAKE_CASE : bool = True
__SCREAMING_SNAKE_CASE : bool = False
__SCREAMING_SNAKE_CASE : bool = False
__SCREAMING_SNAKE_CASE : bool = False
__SCREAMING_SNAKE_CASE : float = 0
__SCREAMING_SNAKE_CASE : bool = True
__SCREAMING_SNAKE_CASE : bool = False
__SCREAMING_SNAKE_CASE : int = 128
__SCREAMING_SNAKE_CASE : "TrunkConfig" = None
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
if self.trunk is None:
lowercase_ = TrunkConfig()
elif isinstance(self.trunk , UpperCamelCase__ ):
lowercase_ = TrunkConfig(**self.trunk )
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ = asdict(self )
lowercase_ = self.trunk.to_dict()
return output
@dataclass
class UpperCamelCase__ :
__SCREAMING_SNAKE_CASE : int = 48
__SCREAMING_SNAKE_CASE : int = 1024
__SCREAMING_SNAKE_CASE : int = 128
__SCREAMING_SNAKE_CASE : int = 32
__SCREAMING_SNAKE_CASE : int = 32
__SCREAMING_SNAKE_CASE : int = 32
__SCREAMING_SNAKE_CASE : float = 0
__SCREAMING_SNAKE_CASE : float = 0
__SCREAMING_SNAKE_CASE : bool = False
__SCREAMING_SNAKE_CASE : int = 4
__SCREAMING_SNAKE_CASE : Optional[int] = 128
__SCREAMING_SNAKE_CASE : "StructureModuleConfig" = None
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
if self.structure_module is None:
lowercase_ = StructureModuleConfig()
elif isinstance(self.structure_module , UpperCamelCase__ ):
lowercase_ = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(F'''`max_recycles` should be positive, got {self.max_recycles}.''' )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
"""`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got"""
F''' {self.sequence_state_dim} and {self.sequence_state_dim}.''' )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
"""`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got"""
F''' {self.pairwise_state_dim} and {self.pairwise_state_dim}.''' )
lowercase_ = self.sequence_state_dim // self.sequence_head_width
lowercase_ = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
"""`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got"""
F''' {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.''' )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
"""`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got"""
F''' {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.''' )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(F'''`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.''' )
if self.dropout >= 0.4:
raise ValueError(F'''`dropout` should not be greater than 0.4, got {self.dropout}.''' )
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ = asdict(self )
lowercase_ = self.structure_module.to_dict()
return output
@dataclass
class UpperCamelCase__ :
__SCREAMING_SNAKE_CASE : int = 384
__SCREAMING_SNAKE_CASE : int = 128
__SCREAMING_SNAKE_CASE : int = 16
__SCREAMING_SNAKE_CASE : int = 128
__SCREAMING_SNAKE_CASE : int = 12
__SCREAMING_SNAKE_CASE : int = 4
__SCREAMING_SNAKE_CASE : int = 8
__SCREAMING_SNAKE_CASE : float = 0.1
__SCREAMING_SNAKE_CASE : int = 8
__SCREAMING_SNAKE_CASE : int = 1
__SCREAMING_SNAKE_CASE : int = 2
__SCREAMING_SNAKE_CASE : int = 7
__SCREAMING_SNAKE_CASE : int = 10
__SCREAMING_SNAKE_CASE : float = 1e-8
__SCREAMING_SNAKE_CASE : float = 1e5
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
return asdict(self )
def UpperCAmelCase_ ( ):
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 650
|
def UpperCAmelCase_ ( UpperCAmelCase__=2_8_1_2_3 ):
lowercase_ = [1] * (limit + 1)
for i in range(2 , int(limit**0.5 ) + 1 ):
sum_divs[i * i] += i
for k in range(i + 1 , limit // i + 1 ):
sum_divs[k * i] += k + i
lowercase_ = set()
lowercase_ = 0
for n in range(1 , limit + 1 ):
if sum_divs[n] > n:
abundants.add(UpperCAmelCase__ )
if not any((n - a in abundants) for a in abundants ):
res += n
return res
if __name__ == "__main__":
print(solution())
| 650
| 1
|
from ..utils import is_flax_available, is_torch_available
if is_torch_available():
from .autoencoder_kl import AutoencoderKL
from .controlnet import ControlNetModel
from .dual_transformer_ad import DualTransformeraDModel
from .modeling_utils import ModelMixin
from .prior_transformer import PriorTransformer
from .ta_film_transformer import TaFilmDecoder
from .transformer_ad import TransformeraDModel
from .unet_ad import UNetaDModel
from .unet_ad import UNetaDModel
from .unet_ad_condition import UNetaDConditionModel
from .unet_ad_condition import UNetaDConditionModel
from .vq_model import VQModel
if is_flax_available():
from .controlnet_flax import FlaxControlNetModel
from .unet_ad_condition_flax import FlaxUNetaDConditionModel
from .vae_flax import FlaxAutoencoderKL
| 650
|
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class UpperCamelCase__ :
def __init__( self : List[str] , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[str]=13 , UpperCamelCase__ : str=7 , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : str=False , UpperCamelCase__ : str=True , UpperCamelCase__ : Union[str, Any]=99 , UpperCamelCase__ : Dict=32 , UpperCamelCase__ : Union[str, Any]=5 , UpperCamelCase__ : Tuple=4 , UpperCamelCase__ : Dict=37 , UpperCamelCase__ : Any="gelu" , UpperCamelCase__ : Dict=0.1 , UpperCamelCase__ : Tuple=0.1 , UpperCamelCase__ : Tuple=512 , UpperCamelCase__ : Union[str, Any]=16 , UpperCamelCase__ : List[str]=2 , UpperCamelCase__ : int=0.02 , UpperCamelCase__ : int=3 , UpperCamelCase__ : Optional[Any]=4 , UpperCamelCase__ : int=None , ):
'''simple docstring'''
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = seq_length
lowercase_ = is_training
lowercase_ = use_input_mask
lowercase_ = use_token_type_ids
lowercase_ = use_labels
lowercase_ = vocab_size
lowercase_ = hidden_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = intermediate_size
lowercase_ = hidden_act
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = max_position_embeddings
lowercase_ = type_vocab_size
lowercase_ = type_sequence_label_size
lowercase_ = initializer_range
lowercase_ = num_labels
lowercase_ = num_choices
lowercase_ = scope
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase_ = None
if self.use_input_mask:
lowercase_ = random_attention_mask([self.batch_size, self.seq_length] )
lowercase_ = None
if self.use_token_type_ids:
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase_ = None
lowercase_ = None
lowercase_ = None
if self.use_labels:
lowercase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase_ = ids_tensor([self.batch_size] , self.num_choices )
lowercase_ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , use_stable_embedding=UpperCamelCase__ , )
def UpperCAmelCase__ ( self : str , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Dict , UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : str , UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
lowercase_ = OpenLlamaModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowercase_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ )
lowercase_ = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase__ ( self : Union[str, Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : str , UpperCamelCase__ : Tuple , UpperCamelCase__ : str , UpperCamelCase__ : int , UpperCamelCase__ : str , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any] , ):
'''simple docstring'''
lowercase_ = True
lowercase_ = OpenLlamaModel(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowercase_ = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , )
lowercase_ = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , )
lowercase_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase__ ( self : Any , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : str , UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Any , UpperCamelCase__ : List[Any] , ):
'''simple docstring'''
lowercase_ = OpenLlamaForCausalLM(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowercase_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase__ ( self : int , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : List[str] , UpperCamelCase__ : int , UpperCamelCase__ : str , UpperCamelCase__ : Tuple , ):
'''simple docstring'''
lowercase_ = True
lowercase_ = True
lowercase_ = OpenLlamaForCausalLM(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
# first forward pass
lowercase_ = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , use_cache=UpperCamelCase__ , )
lowercase_ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowercase_ = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowercase_ = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
lowercase_ = torch.cat([input_ids, next_tokens] , dim=-1 )
lowercase_ = torch.cat([input_mask, next_mask] , dim=-1 )
lowercase_ = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , output_hidden_states=UpperCamelCase__ , )["""hidden_states"""][0]
lowercase_ = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , past_key_values=UpperCamelCase__ , output_hidden_states=UpperCamelCase__ , )["""hidden_states"""][0]
# select random slice
lowercase_ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowercase_ = output_from_no_past[:, -3:, random_slice_idx].detach()
lowercase_ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-3 ) )
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ = self.prepare_config_and_inputs()
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) = config_and_inputs
lowercase_ = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
__SCREAMING_SNAKE_CASE : List[str] = (OpenLlamaForCausalLM,) if is_torch_available() else ()
__SCREAMING_SNAKE_CASE : List[Any] = (
{
'feature-extraction': OpenLlamaModel,
'text-classification': OpenLlamaForSequenceClassification,
'text-generation': OpenLlamaForCausalLM,
'zero-shot': OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
__SCREAMING_SNAKE_CASE : Tuple = False
__SCREAMING_SNAKE_CASE : Optional[int] = False
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
lowercase_ = OpenLlamaModelTester(self )
lowercase_ = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=37 )
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowercase_ = type
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ = 3
lowercase_ = input_dict["""input_ids"""]
lowercase_ = input_ids.ne(1 ).to(UpperCamelCase__ )
lowercase_ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowercase_ = OpenLlamaForSequenceClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowercase_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ = 3
lowercase_ = """single_label_classification"""
lowercase_ = input_dict["""input_ids"""]
lowercase_ = input_ids.ne(1 ).to(UpperCamelCase__ )
lowercase_ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowercase_ = OpenLlamaForSequenceClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowercase_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ = 3
lowercase_ = """multi_label_classification"""
lowercase_ = input_dict["""input_ids"""]
lowercase_ = input_ids.ne(1 ).to(UpperCamelCase__ )
lowercase_ = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
lowercase_ = OpenLlamaForSequenceClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowercase_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip("""Open-Llama buffers include complex numbers, which breaks this test""" )
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
pass
@parameterized.expand([("""linear""",), ("""dynamic""",)] )
def UpperCAmelCase__ ( self : Tuple , UpperCamelCase__ : Union[str, Any] ):
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ = ids_tensor([1, 10] , config.vocab_size )
lowercase_ = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowercase_ = OpenLlamaModel(UpperCamelCase__ )
original_model.to(UpperCamelCase__ )
original_model.eval()
lowercase_ = original_model(UpperCamelCase__ ).last_hidden_state
lowercase_ = original_model(UpperCamelCase__ ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowercase_ = {"""type""": scaling_type, """factor""": 10.0}
lowercase_ = OpenLlamaModel(UpperCamelCase__ )
scaled_model.to(UpperCamelCase__ )
scaled_model.eval()
lowercase_ = scaled_model(UpperCamelCase__ ).last_hidden_state
lowercase_ = scaled_model(UpperCamelCase__ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-5 ) )
| 650
| 1
|
a = '\n# Installazione di Transformers\n! pip install transformers datasets\n# Per installare dalla fonte invece dell\'ultima versione rilasciata, commenta il comando sopra e\n# rimuovi la modalità commento al comando seguente.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
a = [{'type': 'code', 'content': INSTALL_CONTENT}]
a = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 650
|
import io
import math
from typing import Dict, Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
get_image_size,
infer_channel_dimension_format,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_vision_available, logging
from ...utils.import_utils import requires_backends
if is_vision_available():
import textwrap
from PIL import Image, ImageDraw, ImageFont
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
a = False
a = logging.get_logger(__name__)
a = 'ybelkada/fonts'
def UpperCAmelCase_ ( ):
if is_torch_available() and not is_torch_greater_or_equal_than_1_11:
raise ImportError(
F'''You are using torch=={torch.__version__}, but torch>=1.11.0 is required to use '''
"""Pix2StructImageProcessor. Please upgrade torch.""" )
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
requires_backends(UpperCAmelCase__ , ["""torch"""] )
_check_torch_version()
lowercase_ = image_tensor.unsqueeze(0 )
lowercase_ = torch.nn.functional.unfold(UpperCAmelCase__ , (patch_height, patch_width) , stride=(patch_height, patch_width) )
lowercase_ = patches.reshape(image_tensor.size(0 ) , image_tensor.size(1 ) , UpperCAmelCase__ , UpperCAmelCase__ , -1 )
lowercase_ = patches.permute(0 , 4 , 2 , 3 , 1 ).reshape(
image_tensor.size(2 ) // patch_height , image_tensor.size(3 ) // patch_width , image_tensor.size(1 ) * patch_height * patch_width , )
return patches.unsqueeze(0 )
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ = 3_6 , UpperCAmelCase__ = "black" , UpperCAmelCase__ = "white" , UpperCAmelCase__ = 5 , UpperCAmelCase__ = 5 , UpperCAmelCase__ = 5 , UpperCAmelCase__ = 5 , UpperCAmelCase__ = None , UpperCAmelCase__ = None , ):
requires_backends(UpperCAmelCase__ , """vision""" )
# Add new lines so that each line is no more than 80 characters.
lowercase_ = textwrap.TextWrapper(width=8_0 )
lowercase_ = wrapper.wrap(text=UpperCAmelCase__ )
lowercase_ = """\n""".join(UpperCAmelCase__ )
if font_bytes is not None and font_path is None:
lowercase_ = io.BytesIO(UpperCAmelCase__ )
elif font_path is not None:
lowercase_ = font_path
else:
lowercase_ = hf_hub_download(UpperCAmelCase__ , """Arial.TTF""" )
lowercase_ = ImageFont.truetype(UpperCAmelCase__ , encoding="""UTF-8""" , size=UpperCAmelCase__ )
# Use a temporary canvas to determine the width and height in pixels when
# rendering the text.
lowercase_ = ImageDraw.Draw(Image.new("""RGB""" , (1, 1) , UpperCAmelCase__ ) )
lowercase_ , lowercase_ , lowercase_ , lowercase_ = temp_draw.textbbox((0, 0) , UpperCAmelCase__ , UpperCAmelCase__ )
# Create the actual image with a bit of padding around the text.
lowercase_ = text_width + left_padding + right_padding
lowercase_ = text_height + top_padding + bottom_padding
lowercase_ = Image.new("""RGB""" , (image_width, image_height) , UpperCAmelCase__ )
lowercase_ = ImageDraw.Draw(UpperCAmelCase__ )
draw.text(xy=(left_padding, top_padding) , text=UpperCAmelCase__ , fill=UpperCAmelCase__ , font=UpperCAmelCase__ )
return image
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ):
requires_backends(UpperCAmelCase__ , """vision""" )
# Convert to PIL image if necessary
lowercase_ = to_pil_image(UpperCAmelCase__ )
lowercase_ = render_text(UpperCAmelCase__ , **UpperCAmelCase__ )
lowercase_ = max(header_image.width , image.width )
lowercase_ = int(image.height * (new_width / image.width) )
lowercase_ = int(header_image.height * (new_width / header_image.width) )
lowercase_ = Image.new("""RGB""" , (new_width, new_height + new_header_height) , """white""" )
new_image.paste(header_image.resize((new_width, new_header_height) ) , (0, 0) )
new_image.paste(image.resize((new_width, new_height) ) , (0, new_header_height) )
# Convert back to the original framework if necessary
lowercase_ = to_numpy_array(UpperCAmelCase__ )
if infer_channel_dimension_format(UpperCAmelCase__ ) == ChannelDimension.LAST:
lowercase_ = to_channel_dimension_format(UpperCAmelCase__ , ChannelDimension.LAST )
return new_image
class UpperCamelCase__ ( __magic_name__ ):
__SCREAMING_SNAKE_CASE : Tuple = ['flattened_patches']
def __init__( self : str , UpperCamelCase__ : bool = True , UpperCamelCase__ : bool = True , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : int = 2_048 , UpperCamelCase__ : bool = False , **UpperCamelCase__ : Optional[int] , ):
'''simple docstring'''
super().__init__(**UpperCamelCase__ )
lowercase_ = patch_size if patch_size is not None else {"""height""": 16, """width""": 16}
lowercase_ = do_normalize
lowercase_ = do_convert_rgb
lowercase_ = max_patches
lowercase_ = is_vqa
def UpperCAmelCase__ ( self : Optional[Any] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : int , UpperCamelCase__ : dict , **UpperCamelCase__ : Optional[int] ):
'''simple docstring'''
requires_backends(self.extract_flattened_patches , """torch""" )
_check_torch_version()
# convert to torch
lowercase_ = to_channel_dimension_format(UpperCamelCase__ , ChannelDimension.FIRST )
lowercase_ = torch.from_numpy(UpperCamelCase__ )
lowercase_ , lowercase_ = patch_size["""height"""], patch_size["""width"""]
lowercase_ , lowercase_ = get_image_size(UpperCamelCase__ )
# maximize scale s.t.
lowercase_ = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width) )
lowercase_ = max(min(math.floor(scale * image_height / patch_height ) , UpperCamelCase__ ) , 1 )
lowercase_ = max(min(math.floor(scale * image_width / patch_width ) , UpperCamelCase__ ) , 1 )
lowercase_ = max(num_feasible_rows * patch_height , 1 )
lowercase_ = max(num_feasible_cols * patch_width , 1 )
lowercase_ = torch.nn.functional.interpolate(
image.unsqueeze(0 ) , size=(resized_height, resized_width) , mode="""bilinear""" , align_corners=UpperCamelCase__ , antialias=UpperCamelCase__ , ).squeeze(0 )
# [1, rows, columns, patch_height * patch_width * image_channels]
lowercase_ = torch_extract_patches(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
lowercase_ = patches.shape
lowercase_ = patches_shape[1]
lowercase_ = patches_shape[2]
lowercase_ = patches_shape[3]
# [rows * columns, patch_height * patch_width * image_channels]
lowercase_ = patches.reshape([rows * columns, depth] )
# [rows * columns, 1]
lowercase_ = torch.arange(UpperCamelCase__ ).reshape([rows, 1] ).repeat(1 , UpperCamelCase__ ).reshape([rows * columns, 1] )
lowercase_ = torch.arange(UpperCamelCase__ ).reshape([1, columns] ).repeat(UpperCamelCase__ , 1 ).reshape([rows * columns, 1] )
# Offset by 1 so the ids do not contain zeros, which represent padding.
row_ids += 1
col_ids += 1
# Prepare additional patch features.
# [rows * columns, 1]
lowercase_ = row_ids.to(torch.floataa )
lowercase_ = col_ids.to(torch.floataa )
# [rows * columns, 2 + patch_height * patch_width * image_channels]
lowercase_ = torch.cat([row_ids, col_ids, patches] , -1 )
# [max_patches, 2 + patch_height * patch_width * image_channels]
lowercase_ = torch.nn.functional.pad(UpperCamelCase__ , [0, 0, 0, max_patches - (rows * columns)] ).float()
lowercase_ = to_numpy_array(UpperCamelCase__ )
return result
def UpperCAmelCase__ ( self : List[Any] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Dict ):
'''simple docstring'''
if image.dtype == np.uinta:
lowercase_ = image.astype(np.floataa )
# take mean across the whole `image`
lowercase_ = np.mean(UpperCamelCase__ )
lowercase_ = np.std(UpperCamelCase__ )
lowercase_ = max(UpperCamelCase__ , 1.0 / math.sqrt(np.prod(image.shape ) ) )
return normalize(UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ , **UpperCamelCase__ )
def UpperCAmelCase__ ( self : str , UpperCamelCase__ : ImageInput , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[Dict[str, int]] = None , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , UpperCamelCase__ : ChannelDimension = ChannelDimension.FIRST , **UpperCamelCase__ : Union[str, Any] , ):
'''simple docstring'''
lowercase_ = do_normalize if do_normalize is not None else self.do_normalize
lowercase_ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
lowercase_ = patch_size if patch_size is not None else self.patch_size
lowercase_ = max_patches if max_patches is not None else self.max_patches
lowercase_ = self.is_vqa
if kwargs.get("""data_format""" , UpperCamelCase__ ) is not None:
raise ValueError("""data_format is not an accepted input as the outputs are """ )
lowercase_ = make_list_of_images(UpperCamelCase__ )
if not valid_images(UpperCamelCase__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
lowercase_ = [convert_to_rgb(UpperCamelCase__ ) for image in images]
# All transformations expect numpy arrays.
lowercase_ = [to_numpy_array(UpperCamelCase__ ) for image in images]
if is_vqa:
if header_text is None:
raise ValueError("""A header text must be provided for VQA models.""" )
lowercase_ = kwargs.pop("""font_bytes""" , UpperCamelCase__ )
lowercase_ = kwargs.pop("""font_path""" , UpperCamelCase__ )
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
lowercase_ = [header_text] * len(UpperCamelCase__ )
lowercase_ = [
render_header(UpperCamelCase__ , header_text[i] , font_bytes=UpperCamelCase__ , font_path=UpperCamelCase__ )
for i, image in enumerate(UpperCamelCase__ )
]
if do_normalize:
lowercase_ = [self.normalize(image=UpperCamelCase__ ) for image in images]
# convert to torch tensor and permute
lowercase_ = [
self.extract_flattened_patches(image=UpperCamelCase__ , max_patches=UpperCamelCase__ , patch_size=UpperCamelCase__ )
for image in images
]
# create attention mask in numpy
lowercase_ = [(image.sum(axis=-1 ) != 0).astype(np.floataa ) for image in images]
lowercase_ = BatchFeature(
data={"""flattened_patches""": images, """attention_mask""": attention_masks} , tensor_type=UpperCamelCase__ )
return encoded_outputs
| 650
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a = {
'configuration_upernet': ['UperNetConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
'UperNetForSemanticSegmentation',
'UperNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_upernet import UperNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel
else:
import sys
a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 650
|
import cva
import numpy as np
class UpperCamelCase__ :
def __init__( self : List[str] , UpperCamelCase__ : float , UpperCamelCase__ : int ):
'''simple docstring'''
if k in (0.04, 0.06):
lowercase_ = k
lowercase_ = window_size
else:
raise ValueError("""invalid k value""" )
def __str__( self : Optional[int] ):
'''simple docstring'''
return str(self.k )
def UpperCAmelCase__ ( self : Optional[Any] , UpperCamelCase__ : str ):
'''simple docstring'''
lowercase_ = cva.imread(UpperCamelCase__ , 0 )
lowercase_ , lowercase_ = img.shape
lowercase_ = []
lowercase_ = img.copy()
lowercase_ = cva.cvtColor(UpperCamelCase__ , cva.COLOR_GRAY2RGB )
lowercase_ , lowercase_ = np.gradient(UpperCamelCase__ )
lowercase_ = dx**2
lowercase_ = dy**2
lowercase_ = dx * dy
lowercase_ = 0.04
lowercase_ = self.window_size // 2
for y in range(UpperCamelCase__ , h - offset ):
for x in range(UpperCamelCase__ , w - offset ):
lowercase_ = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowercase_ = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowercase_ = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowercase_ = (wxx * wyy) - (wxy**2)
lowercase_ = wxx + wyy
lowercase_ = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 255 )
return color_img, corner_list
if __name__ == "__main__":
a = HarrisCorner(0.04, 3)
a , a = edge_detect.detect('path_to_image')
cva.imwrite('detect.png', color_img)
| 650
| 1
|
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__=1_0_2_4 , UpperCAmelCase__=1_0_2_4 , UpperCAmelCase__=False , **UpperCAmelCase__ ):
lowercase_ = AutoTokenizer.from_pretrained(UpperCAmelCase__ )
lowercase_ = SeqaSeqDataset(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , type_path="""train""" , **UpperCAmelCase__ )
lowercase_ = tok.pad_token_id
def get_lens(UpperCAmelCase__ ):
lowercase_ = tqdm(
DataLoader(UpperCAmelCase__ , batch_size=5_1_2 , num_workers=8 , shuffle=UpperCAmelCase__ , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , )
lowercase_ = []
for batch in dl:
lowercase_ = batch["""input_ids"""].ne(UpperCAmelCase__ ).sum(1 ).tolist()
lowercase_ = batch["""labels"""].ne(UpperCAmelCase__ ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(UpperCAmelCase__ , UpperCAmelCase__ ):
max_lens.append(max(UpperCAmelCase__ , UpperCAmelCase__ ) )
else:
max_lens.extend(UpperCAmelCase__ )
return max_lens
lowercase_ = get_lens(UpperCAmelCase__ )
lowercase_ = SeqaSeqDataset(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , type_path="""val""" , **UpperCAmelCase__ )
lowercase_ = get_lens(UpperCAmelCase__ )
pickle_save(UpperCAmelCase__ , train_ds.len_file )
pickle_save(UpperCAmelCase__ , val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 650
|
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse('9.1.0'):
a = {
'linear': PIL.Image.Resampling.BILINEAR,
'bilinear': PIL.Image.Resampling.BILINEAR,
'bicubic': PIL.Image.Resampling.BICUBIC,
'lanczos': PIL.Image.Resampling.LANCZOS,
'nearest': PIL.Image.Resampling.NEAREST,
}
else:
a = {
'linear': PIL.Image.LINEAR,
'bilinear': PIL.Image.BILINEAR,
'bicubic': PIL.Image.BICUBIC,
'lanczos': PIL.Image.LANCZOS,
'nearest': PIL.Image.NEAREST,
}
def UpperCAmelCase_ ( UpperCAmelCase__ ):
lowercase_ = (images / 2 + 0.5).clamp(0 , 1 )
lowercase_ = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
lowercase_ = numpy_to_pil(UpperCAmelCase__ )
return images
def UpperCAmelCase_ ( UpperCAmelCase__ ):
if images.ndim == 3:
lowercase_ = images[None, ...]
lowercase_ = (images * 2_5_5).round().astype("""uint8""" )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
lowercase_ = [Image.fromarray(image.squeeze() , mode="""L""" ) for image in images]
else:
lowercase_ = [Image.fromarray(UpperCAmelCase__ ) for image in images]
return pil_images
| 650
| 1
|
from typing import Any
def UpperCAmelCase_ ( UpperCAmelCase__ ):
if not input_list:
return []
lowercase_ = [input_list.count(UpperCAmelCase__ ) for value in input_list]
lowercase_ = max(UpperCAmelCase__ ) # Gets the maximum count in the input list.
# Gets values of modes
return sorted({input_list[i] for i, value in enumerate(UpperCAmelCase__ ) if value == y} )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 650
|
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCamelCase__ ( __magic_name__ ):
__SCREAMING_SNAKE_CASE : str = (UnCLIPScheduler,)
def UpperCAmelCase__ ( self : int , **UpperCamelCase__ : int ):
'''simple docstring'''
lowercase_ = {
"""num_train_timesteps""": 1_000,
"""variance_type""": """fixed_small_log""",
"""clip_sample""": True,
"""clip_sample_range""": 1.0,
"""prediction_type""": """epsilon""",
}
config.update(**UpperCamelCase__ )
return config
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
for timesteps in [1, 5, 100, 1_000]:
self.check_over_configs(num_train_timesteps=UpperCamelCase__ )
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=UpperCamelCase__ )
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=UpperCamelCase__ )
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=UpperCamelCase__ )
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=UpperCamelCase__ )
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
for time_step in [0, 500, 999]:
for prev_timestep in [None, 5, 100, 250, 500, 750]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=UpperCamelCase__ , prev_timestep=UpperCamelCase__ )
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ = self.scheduler_classes[0]
lowercase_ = self.get_scheduler_config(variance_type="""fixed_small_log""" )
lowercase_ = scheduler_class(**UpperCamelCase__ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0000e-10 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_549_625 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.9_994_987 ) ) < 1e-5
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
lowercase_ = self.scheduler_classes[0]
lowercase_ = self.get_scheduler_config(variance_type="""learned_range""" )
lowercase_ = scheduler_class(**UpperCamelCase__ )
lowercase_ = 0.5
assert scheduler._get_variance(1 , predicted_variance=UpperCamelCase__ ) - -10.1_712_790 < 1e-5
assert scheduler._get_variance(487 , predicted_variance=UpperCamelCase__ ) - -5.7_998_052 < 1e-5
assert scheduler._get_variance(999 , predicted_variance=UpperCamelCase__ ) - -0.0_010_011 < 1e-5
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ = self.scheduler_classes[0]
lowercase_ = self.get_scheduler_config()
lowercase_ = scheduler_class(**UpperCamelCase__ )
lowercase_ = scheduler.timesteps
lowercase_ = self.dummy_model()
lowercase_ = self.dummy_sample_deter
lowercase_ = torch.manual_seed(0 )
for i, t in enumerate(UpperCamelCase__ ):
# 1. predict noise residual
lowercase_ = model(UpperCamelCase__ , UpperCamelCase__ )
# 2. predict previous mean of sample x_t-1
lowercase_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , generator=UpperCamelCase__ ).prev_sample
lowercase_ = pred_prev_sample
lowercase_ = torch.sum(torch.abs(UpperCamelCase__ ) )
lowercase_ = torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_sum.item() - 252.2_682_495 ) < 1e-2
assert abs(result_mean.item() - 0.3_284_743 ) < 1e-3
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
lowercase_ = self.scheduler_classes[0]
lowercase_ = self.get_scheduler_config()
lowercase_ = scheduler_class(**UpperCamelCase__ )
scheduler.set_timesteps(25 )
lowercase_ = scheduler.timesteps
lowercase_ = self.dummy_model()
lowercase_ = self.dummy_sample_deter
lowercase_ = torch.manual_seed(0 )
for i, t in enumerate(UpperCamelCase__ ):
# 1. predict noise residual
lowercase_ = model(UpperCamelCase__ , UpperCamelCase__ )
if i + 1 == timesteps.shape[0]:
lowercase_ = None
else:
lowercase_ = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
lowercase_ = scheduler.step(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , prev_timestep=UpperCamelCase__ , generator=UpperCamelCase__ ).prev_sample
lowercase_ = pred_prev_sample
lowercase_ = torch.sum(torch.abs(UpperCamelCase__ ) )
lowercase_ = torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_sum.item() - 258.2_044_983 ) < 1e-2
assert abs(result_mean.item() - 0.3_362_038 ) < 1e-3
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
pass
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
pass
| 650
| 1
|
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def UpperCAmelCase_ ( ):
lowercase_ = """https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png"""
lowercase_ = Image.open(requests.get(UpperCAmelCase__ , stream=UpperCAmelCase__ ).raw ).convert("""RGB""" )
return image
def UpperCAmelCase_ ( UpperCAmelCase__ ):
lowercase_ = []
# fmt: off
# vision encoder
rename_keys.append(("""visual_encoder.cls_token""", """vision_model.embeddings.class_embedding""") )
rename_keys.append(("""visual_encoder.pos_embed""", """vision_model.embeddings.position_embedding""") )
rename_keys.append(("""visual_encoder.patch_embed.proj.weight""", """vision_model.embeddings.patch_embedding.weight""") )
rename_keys.append(("""visual_encoder.patch_embed.proj.bias""", """vision_model.embeddings.patch_embedding.bias""") )
rename_keys.append(("""ln_vision.weight""", """vision_model.post_layernorm.weight""") )
rename_keys.append(("""ln_vision.bias""", """vision_model.post_layernorm.bias""") )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.weight''', F'''vision_model.encoder.layers.{i}.layer_norm1.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.bias''', F'''vision_model.encoder.layers.{i}.layer_norm1.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.weight''', F'''vision_model.encoder.layers.{i}.layer_norm2.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.bias''', F'''vision_model.encoder.layers.{i}.layer_norm2.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.qkv.weight''', F'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.weight''', F'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.bias''', F'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') )
# QFormer
rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.weight""", """qformer.layernorm.weight""") )
rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.bias""", """qformer.layernorm.bias""") )
# fmt: on
return rename_keys
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase_ = dct.pop(UpperCAmelCase__ )
lowercase_ = val
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ):
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
lowercase_ = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.q_bias''' )
lowercase_ = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.v_bias''' )
# next, set bias in the state dict
lowercase_ = torch.cat((q_bias, torch.zeros_like(UpperCAmelCase__ , requires_grad=UpperCAmelCase__ ), v_bias) )
lowercase_ = qkv_bias
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase_ = 3_6_4 if """coco""" in model_name else 2_2_4
lowercase_ = BlipaVisionConfig(image_size=UpperCAmelCase__ ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
lowercase_ = OPTConfig.from_pretrained("""facebook/opt-2.7b""" , eos_token_id=UpperCAmelCase__ ).to_dict()
elif "opt-6.7b" in model_name:
lowercase_ = OPTConfig.from_pretrained("""facebook/opt-6.7b""" , eos_token_id=UpperCAmelCase__ ).to_dict()
elif "t5-xl" in model_name:
lowercase_ = TaConfig.from_pretrained("""google/flan-t5-xl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
lowercase_ = TaConfig.from_pretrained("""google/flan-t5-xxl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict()
lowercase_ = BlipaConfig(vision_config=UpperCAmelCase__ , text_config=UpperCAmelCase__ )
return config, image_size
@torch.no_grad()
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__=None , UpperCAmelCase__=False ):
lowercase_ = (
AutoTokenizer.from_pretrained("""facebook/opt-2.7b""" )
if """opt""" in model_name
else AutoTokenizer.from_pretrained("""google/flan-t5-xl""" )
)
lowercase_ = tokenizer("""\n""" , add_special_tokens=UpperCAmelCase__ ).input_ids[0]
lowercase_ , lowercase_ = get_blipa_config(UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ )
lowercase_ = BlipaForConditionalGeneration(UpperCAmelCase__ ).eval()
lowercase_ = {
"""blip2-opt-2.7b""": ("""blip2_opt""", """pretrain_opt2.7b"""),
"""blip2-opt-6.7b""": ("""blip2_opt""", """pretrain_opt6.7b"""),
"""blip2-opt-2.7b-coco""": ("""blip2_opt""", """caption_coco_opt2.7b"""),
"""blip2-opt-6.7b-coco""": ("""blip2_opt""", """caption_coco_opt6.7b"""),
"""blip2-flan-t5-xl""": ("""blip2_t5""", """pretrain_flant5xl"""),
"""blip2-flan-t5-xl-coco""": ("""blip2_t5""", """caption_coco_flant5xl"""),
"""blip2-flan-t5-xxl""": ("""blip2_t5""", """pretrain_flant5xxl"""),
}
lowercase_ , lowercase_ = model_name_to_original[model_name]
# load original model
print("""Loading original model...""" )
lowercase_ = """cuda""" if torch.cuda.is_available() else """cpu"""
lowercase_ , lowercase_ , lowercase_ = load_model_and_preprocess(
name=UpperCAmelCase__ , model_type=UpperCAmelCase__ , is_eval=UpperCAmelCase__ , device=UpperCAmelCase__ )
original_model.eval()
print("""Done!""" )
# update state dict keys
lowercase_ = original_model.state_dict()
lowercase_ = create_rename_keys(UpperCAmelCase__ )
for src, dest in rename_keys:
rename_key(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
lowercase_ = state_dict.pop(UpperCAmelCase__ )
if key.startswith("""Qformer.bert""" ):
lowercase_ = key.replace("""Qformer.bert""" , """qformer""" )
if "attention.self" in key:
lowercase_ = key.replace("""self""" , """attention""" )
if "opt_proj" in key:
lowercase_ = key.replace("""opt_proj""" , """language_projection""" )
if "t5_proj" in key:
lowercase_ = key.replace("""t5_proj""" , """language_projection""" )
if key.startswith("""opt""" ):
lowercase_ = key.replace("""opt""" , """language""" )
if key.startswith("""t5""" ):
lowercase_ = key.replace("""t5""" , """language""" )
lowercase_ = val
# read in qv biases
read_in_q_v_bias(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase_ , lowercase_ = hf_model.load_state_dict(UpperCAmelCase__ , strict=UpperCAmelCase__ )
assert len(UpperCAmelCase__ ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
lowercase_ = load_demo_image()
lowercase_ = vis_processors["""eval"""](UpperCAmelCase__ ).unsqueeze(0 ).to(UpperCAmelCase__ )
lowercase_ = tokenizer(["""\n"""] , return_tensors="""pt""" ).input_ids.to(UpperCAmelCase__ )
# create processor
lowercase_ = BlipImageProcessor(
size={"""height""": image_size, """width""": image_size} , image_mean=UpperCAmelCase__ , image_std=UpperCAmelCase__ )
lowercase_ = BlipaProcessor(image_processor=UpperCAmelCase__ , tokenizer=UpperCAmelCase__ )
lowercase_ = processor(images=UpperCAmelCase__ , return_tensors="""pt""" ).pixel_values.to(UpperCAmelCase__ )
# make sure processor creates exact same pixel values
assert torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ )
original_model.to(UpperCAmelCase__ )
hf_model.to(UpperCAmelCase__ )
with torch.no_grad():
if "opt" in model_name:
lowercase_ = original_model({"""image""": original_pixel_values, """text_input""": [""""""]} ).logits
lowercase_ = hf_model(UpperCAmelCase__ , UpperCAmelCase__ ).logits
else:
lowercase_ = original_model(
{"""image""": original_pixel_values, """text_input""": ["""\n"""], """text_output""": ["""\n"""]} ).logits
lowercase_ = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -1_0_0 )
lowercase_ = hf_model(UpperCAmelCase__ , UpperCAmelCase__ , labels=UpperCAmelCase__ ).logits
assert original_logits.shape == logits.shape
print("""First values of original logits:""" , original_logits[0, :3, :3] )
print("""First values of HF logits:""" , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
lowercase_ = torch.tensor(
[[-41.5850, -4.4440, -8.9922], [-47.4322, -5.9143, -1.7340]] , device=UpperCAmelCase__ )
assert torch.allclose(logits[0, :3, :3] , UpperCAmelCase__ , atol=1e-4 )
elif model_name == "blip2-flan-t5-xl-coco":
lowercase_ = torch.tensor(
[[-57.0109, -9.8967, -12.6280], [-68.6578, -12.7191, -10.5065]] , device=UpperCAmelCase__ )
else:
# cast to same type
lowercase_ = logits.dtype
assert torch.allclose(original_logits.to(UpperCAmelCase__ ) , UpperCAmelCase__ , atol=1e-2 )
print("""Looks ok!""" )
print("""Generating a caption...""" )
lowercase_ = """"""
lowercase_ = tokenizer(UpperCAmelCase__ , return_tensors="""pt""" ).input_ids.to(UpperCAmelCase__ )
lowercase_ = original_model.generate({"""image""": original_pixel_values} )
lowercase_ = hf_model.generate(
UpperCAmelCase__ , UpperCAmelCase__ , do_sample=UpperCAmelCase__ , num_beams=5 , max_length=3_0 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print("""Original generation:""" , UpperCAmelCase__ )
lowercase_ = input_ids.shape[1]
lowercase_ = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=UpperCAmelCase__ )
lowercase_ = [text.strip() for text in output_text]
print("""HF generation:""" , UpperCAmelCase__ )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(UpperCAmelCase__ )
hf_model.save_pretrained(UpperCAmelCase__ )
if push_to_hub:
processor.push_to_hub(F'''nielsr/{model_name}''' )
hf_model.push_to_hub(F'''nielsr/{model_name}''' )
if __name__ == "__main__":
a = argparse.ArgumentParser()
a = [
'blip2-opt-2.7b',
'blip2-opt-6.7b',
'blip2-opt-2.7b-coco',
'blip2-opt-6.7b-coco',
'blip2-flan-t5-xl',
'blip2-flan-t5-xl-coco',
'blip2-flan-t5-xxl',
]
parser.add_argument(
'--model_name',
default='blip2-opt-2.7b',
choices=choices,
type=str,
help='Path to hf config.json of model to convert',
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub after converting',
)
a = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 650
|
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import numpy as np
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForMaskedImageModeling,
HfArgumentParser,
Trainer,
TrainingArguments,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
a = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt')
a = list(MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING.keys())
a = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class UpperCamelCase__ :
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default='cifar10' , metadata={'help': 'Name of a dataset from the datasets package'} )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__magic_name__ , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__magic_name__ , metadata={'help': 'The column name of the images in the files. If not set, will try to use \'image\' or \'img\'.'} , )
__SCREAMING_SNAKE_CASE : Optional[str] = field(default=__magic_name__ , metadata={'help': 'A folder containing the training data.'} )
__SCREAMING_SNAKE_CASE : Optional[str] = field(default=__magic_name__ , metadata={'help': 'A folder containing the validation data.'} )
__SCREAMING_SNAKE_CASE : Optional[float] = field(
default=0.1_5 , metadata={'help': 'Percent to split off of train for validation.'} )
__SCREAMING_SNAKE_CASE : int = field(default=32 , metadata={'help': 'The size of the square patches to use for masking.'} )
__SCREAMING_SNAKE_CASE : float = field(
default=0.6 , metadata={'help': 'Percentage of patches to mask.'} , )
__SCREAMING_SNAKE_CASE : Optional[int] = field(
default=__magic_name__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
__SCREAMING_SNAKE_CASE : Optional[int] = field(
default=__magic_name__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
lowercase_ = {}
if self.train_dir is not None:
lowercase_ = self.train_dir
if self.validation_dir is not None:
lowercase_ = self.validation_dir
lowercase_ = data_files if data_files else None
@dataclass
class UpperCamelCase__ :
__SCREAMING_SNAKE_CASE : str = field(
default=__magic_name__ , metadata={
'help': (
'The model checkpoint for weights initialization. Can be a local path to a pytorch_model.bin or a '
'checkpoint identifier on the hub. '
'Don\'t set if you want to train a model from scratch.'
)
} , )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__magic_name__ , metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(__magic_name__ )} , )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__magic_name__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__magic_name__ , metadata={
'help': (
'Override some existing default config settings when a model is trained from scratch. Example: '
'n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'
)
} , )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__magic_name__ , metadata={'help': 'Where do you want to store (cache) the pretrained models/datasets downloaded from the hub'} , )
__SCREAMING_SNAKE_CASE : str = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
__SCREAMING_SNAKE_CASE : str = field(default=__magic_name__ , metadata={'help': 'Name or path of preprocessor config.'} )
__SCREAMING_SNAKE_CASE : bool = field(
default=__magic_name__ , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
__SCREAMING_SNAKE_CASE : Optional[int] = field(
default=__magic_name__ , metadata={
'help': (
'The size (resolution) of each image. If not specified, will use `image_size` of the configuration.'
)
} , )
__SCREAMING_SNAKE_CASE : Optional[int] = field(
default=__magic_name__ , metadata={
'help': (
'The size (resolution) of each patch. If not specified, will use `patch_size` of the configuration.'
)
} , )
__SCREAMING_SNAKE_CASE : Optional[int] = field(
default=__magic_name__ , metadata={'help': 'Stride to use for the encoder.'} , )
class UpperCamelCase__ :
def __init__( self : Dict , UpperCamelCase__ : List[Any]=192 , UpperCamelCase__ : Tuple=32 , UpperCamelCase__ : Dict=4 , UpperCamelCase__ : str=0.6 ):
'''simple docstring'''
lowercase_ = input_size
lowercase_ = mask_patch_size
lowercase_ = model_patch_size
lowercase_ = mask_ratio
if self.input_size % self.mask_patch_size != 0:
raise ValueError("""Input size must be divisible by mask patch size""" )
if self.mask_patch_size % self.model_patch_size != 0:
raise ValueError("""Mask patch size must be divisible by model patch size""" )
lowercase_ = self.input_size // self.mask_patch_size
lowercase_ = self.mask_patch_size // self.model_patch_size
lowercase_ = self.rand_size**2
lowercase_ = int(np.ceil(self.token_count * self.mask_ratio ) )
def __call__( self : int ):
'''simple docstring'''
lowercase_ = np.random.permutation(self.token_count )[: self.mask_count]
lowercase_ = np.zeros(self.token_count , dtype=UpperCamelCase__ )
lowercase_ = 1
lowercase_ = mask.reshape((self.rand_size, self.rand_size) )
lowercase_ = mask.repeat(self.scale , axis=0 ).repeat(self.scale , axis=1 )
return torch.tensor(mask.flatten() )
def UpperCAmelCase_ ( UpperCAmelCase__ ):
lowercase_ = torch.stack([example["""pixel_values"""] for example in examples] )
lowercase_ = torch.stack([example["""mask"""] for example in examples] )
return {"pixel_values": pixel_values, "bool_masked_pos": mask}
def UpperCAmelCase_ ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowercase_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowercase_ , lowercase_ , lowercase_ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowercase_ , lowercase_ , lowercase_ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_mim""" , UpperCAmelCase__ , UpperCAmelCase__ )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowercase_ = training_args.get_process_log_level()
logger.setLevel(UpperCAmelCase__ )
transformers.utils.logging.set_verbosity(UpperCAmelCase__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
lowercase_ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowercase_ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Initialize our dataset.
lowercase_ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
lowercase_ = None if """validation""" in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , UpperCAmelCase__ ) and data_args.train_val_split > 0.0:
lowercase_ = ds["""train"""].train_test_split(data_args.train_val_split )
lowercase_ = split["""train"""]
lowercase_ = split["""test"""]
# Create config
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowercase_ = {
"""cache_dir""": model_args.cache_dir,
"""revision""": model_args.model_revision,
"""use_auth_token""": True if model_args.use_auth_token else None,
}
if model_args.config_name_or_path:
lowercase_ = AutoConfig.from_pretrained(model_args.config_name_or_path , **UpperCAmelCase__ )
elif model_args.model_name_or_path:
lowercase_ = AutoConfig.from_pretrained(model_args.model_name_or_path , **UpperCAmelCase__ )
else:
lowercase_ = CONFIG_MAPPING[model_args.model_type]()
logger.warning("""You are instantiating a new config instance from scratch.""" )
if model_args.config_overrides is not None:
logger.info(F'''Overriding config: {model_args.config_overrides}''' )
config.update_from_string(model_args.config_overrides )
logger.info(F'''New config: {config}''' )
# make sure the decoder_type is "simmim" (only relevant for BEiT)
if hasattr(UpperCAmelCase__ , """decoder_type""" ):
lowercase_ = """simmim"""
# adapt config
lowercase_ = model_args.image_size if model_args.image_size is not None else config.image_size
lowercase_ = model_args.patch_size if model_args.patch_size is not None else config.patch_size
lowercase_ = (
model_args.encoder_stride if model_args.encoder_stride is not None else config.encoder_stride
)
config.update(
{
"""image_size""": model_args.image_size,
"""patch_size""": model_args.patch_size,
"""encoder_stride""": model_args.encoder_stride,
} )
# create image processor
if model_args.image_processor_name:
lowercase_ = AutoImageProcessor.from_pretrained(model_args.image_processor_name , **UpperCAmelCase__ )
elif model_args.model_name_or_path:
lowercase_ = AutoImageProcessor.from_pretrained(model_args.model_name_or_path , **UpperCAmelCase__ )
else:
lowercase_ = {
conf.model_type: image_processor_class for conf, image_processor_class in IMAGE_PROCESSOR_MAPPING.items()
}
lowercase_ = IMAGE_PROCESSOR_TYPES[model_args.model_type]()
# create model
if model_args.model_name_or_path:
lowercase_ = AutoModelForMaskedImageModeling.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=UpperCAmelCase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info("""Training new model from scratch""" )
lowercase_ = AutoModelForMaskedImageModeling.from_config(UpperCAmelCase__ )
if training_args.do_train:
lowercase_ = ds["""train"""].column_names
else:
lowercase_ = ds["""validation"""].column_names
if data_args.image_column_name is not None:
lowercase_ = data_args.image_column_name
elif "image" in column_names:
lowercase_ = """image"""
elif "img" in column_names:
lowercase_ = """img"""
else:
lowercase_ = column_names[0]
# transformations as done in original SimMIM paper
# source: https://github.com/microsoft/SimMIM/blob/main/data/data_simmim.py
lowercase_ = Compose(
[
Lambda(lambda UpperCAmelCase__ : img.convert("""RGB""" ) if img.mode != "RGB" else img ),
RandomResizedCrop(model_args.image_size , scale=(0.67, 1.0) , ratio=(3.0 / 4.0, 4.0 / 3.0) ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
# create mask generator
lowercase_ = MaskGenerator(
input_size=model_args.image_size , mask_patch_size=data_args.mask_patch_size , model_patch_size=model_args.patch_size , mask_ratio=data_args.mask_ratio , )
def preprocess_images(UpperCAmelCase__ ):
lowercase_ = [transforms(UpperCAmelCase__ ) for image in examples[image_column_name]]
lowercase_ = [mask_generator() for i in range(len(examples[image_column_name] ) )]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError("""--do_train requires a train dataset""" )
if data_args.max_train_samples is not None:
lowercase_ = ds["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(UpperCAmelCase__ )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError("""--do_eval requires a validation dataset""" )
if data_args.max_eval_samples is not None:
lowercase_ = (
ds["""validation"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(UpperCAmelCase__ )
# Initialize our trainer
lowercase_ = Trainer(
model=UpperCAmelCase__ , args=UpperCAmelCase__ , train_dataset=ds["""train"""] if training_args.do_train else None , eval_dataset=ds["""validation"""] if training_args.do_eval else None , tokenizer=UpperCAmelCase__ , data_collator=UpperCAmelCase__ , )
# Training
if training_args.do_train:
lowercase_ = None
if training_args.resume_from_checkpoint is not None:
lowercase_ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowercase_ = last_checkpoint
lowercase_ = trainer.train(resume_from_checkpoint=UpperCAmelCase__ )
trainer.save_model()
trainer.log_metrics("""train""" , train_result.metrics )
trainer.save_metrics("""train""" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
lowercase_ = trainer.evaluate()
trainer.log_metrics("""eval""" , UpperCAmelCase__ )
trainer.save_metrics("""eval""" , UpperCAmelCase__ )
# Write model card and (optionally) push to hub
lowercase_ = {
"""finetuned_from""": model_args.model_name_or_path,
"""tasks""": """masked-image-modeling""",
"""dataset""": data_args.dataset_name,
"""tags""": ["""masked-image-modeling"""],
}
if training_args.push_to_hub:
trainer.push_to_hub(**UpperCAmelCase__ )
else:
trainer.create_model_card(**UpperCAmelCase__ )
if __name__ == "__main__":
main()
| 650
| 1
|
import math
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ):
if (
not isinstance(UpperCAmelCase__ , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError("""power_factor must be a valid float value between -1 and 1.""" )
return apparent_power * power_factor
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ):
if (
not isinstance(UpperCAmelCase__ , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError("""power_factor must be a valid float value between -1 and 1.""" )
return apparent_power * math.sqrt(1 - power_factor**2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 650
|
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
a = logging.get_logger(__name__)
class UpperCamelCase__ ( __magic_name__ ):
__SCREAMING_SNAKE_CASE : List[Any] = ['pixel_values']
def __init__( self : List[str] , UpperCamelCase__ : bool = True , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase__ : bool = True , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : bool = True , UpperCamelCase__ : Union[int, float] = 1 / 255 , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_MEAN , UpperCamelCase__ : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_STD , **UpperCamelCase__ : Dict , ):
'''simple docstring'''
super().__init__(**UpperCamelCase__ )
lowercase_ = size if size is not None else {"""shortest_edge""": 224}
lowercase_ = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ )
lowercase_ = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
lowercase_ = get_size_dict(UpperCamelCase__ , param_name="""crop_size""" )
lowercase_ = do_resize
lowercase_ = size
lowercase_ = resample
lowercase_ = do_center_crop
lowercase_ = crop_size
lowercase_ = do_rescale
lowercase_ = rescale_factor
lowercase_ = do_normalize
lowercase_ = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
lowercase_ = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def UpperCAmelCase__ ( self : int , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Optional[int] , ):
'''simple docstring'''
lowercase_ = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
lowercase_ = int((256 / 224) * size["""shortest_edge"""] )
lowercase_ = get_resize_output_image_size(UpperCamelCase__ , size=UpperCamelCase__ , default_to_square=UpperCamelCase__ )
lowercase_ = {"""height""": output_size[0], """width""": output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
F'''Size dict must have keys \'height\' and \'width\' or \'shortest_edge\'. Got {size_dict.keys()}''' )
return resize(
UpperCamelCase__ , size=(size_dict["""height"""], size_dict["""width"""]) , resample=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def UpperCAmelCase__ ( self : str , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : str , ):
'''simple docstring'''
lowercase_ = get_size_dict(UpperCamelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(F'''Size dict must have keys \'height\' and \'width\'. Got {size.keys()}''' )
return center_crop(UpperCamelCase__ , size=(size["""height"""], size["""width"""]) , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def UpperCAmelCase__ ( self : Optional[int] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Union[int, float] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : str , ):
'''simple docstring'''
return rescale(UpperCamelCase__ , scale=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def UpperCAmelCase__ ( self : List[str] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Optional[int] , ):
'''simple docstring'''
return normalize(UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def UpperCAmelCase__ ( self : Dict , UpperCamelCase__ : ImageInput , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[Dict[str, int]] = None , UpperCamelCase__ : PILImageResampling = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[Dict[str, int]] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[float] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[Union[float, Iterable[float]]] = None , UpperCamelCase__ : Optional[Union[float, Iterable[float]]] = None , UpperCamelCase__ : Optional[TensorType] = None , UpperCamelCase__ : ChannelDimension = ChannelDimension.FIRST , **UpperCamelCase__ : Optional[int] , ):
'''simple docstring'''
lowercase_ = do_resize if do_resize is not None else self.do_resize
lowercase_ = resample if resample is not None else self.resample
lowercase_ = do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase_ = do_rescale if do_rescale is not None else self.do_rescale
lowercase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase_ = do_normalize if do_normalize is not None else self.do_normalize
lowercase_ = image_mean if image_mean is not None else self.image_mean
lowercase_ = image_std if image_std is not None else self.image_std
lowercase_ = size if size is not None else self.size
lowercase_ = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ )
lowercase_ = crop_size if crop_size is not None else self.crop_size
lowercase_ = get_size_dict(UpperCamelCase__ , param_name="""crop_size""" )
lowercase_ = make_list_of_images(UpperCamelCase__ )
if not valid_images(UpperCamelCase__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
lowercase_ = [to_numpy_array(UpperCamelCase__ ) for image in images]
if do_resize:
lowercase_ = [self.resize(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) for image in images]
if do_center_crop:
lowercase_ = [self.center_crop(UpperCamelCase__ , UpperCamelCase__ ) for image in images]
if do_rescale:
lowercase_ = [self.rescale(UpperCamelCase__ , UpperCamelCase__ ) for image in images]
if do_normalize:
lowercase_ = [self.normalize(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) for image in images]
lowercase_ = [to_channel_dimension_format(UpperCamelCase__ , UpperCamelCase__ ) for image in images]
lowercase_ = {"""pixel_values""": images}
return BatchFeature(data=UpperCamelCase__ , tensor_type=UpperCamelCase__ )
| 650
| 1
|
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ):
if density <= 0:
raise ValueError("""Impossible fluid density""" )
if bulk_modulus <= 0:
raise ValueError("""Impossible bulk modulus""" )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 650
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
a = {
'configuration_trocr': ['TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TrOCRConfig'],
'processing_trocr': ['TrOCRProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
'TROCR_PRETRAINED_MODEL_ARCHIVE_LIST',
'TrOCRForCausalLM',
'TrOCRPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 650
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_torch_available,
)
a = {
'configuration_speecht5': [
'SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP',
'SpeechT5Config',
'SpeechT5HifiGanConfig',
],
'feature_extraction_speecht5': ['SpeechT5FeatureExtractor'],
'processing_speecht5': ['SpeechT5Processor'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = ['SpeechT5Tokenizer']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
'SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST',
'SpeechT5ForSpeechToText',
'SpeechT5ForSpeechToSpeech',
'SpeechT5ForTextToSpeech',
'SpeechT5Model',
'SpeechT5PreTrainedModel',
'SpeechT5HifiGan',
]
if TYPE_CHECKING:
from .configuration_speechta import (
SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP,
SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP,
SpeechTaConfig,
SpeechTaHifiGanConfig,
)
from .feature_extraction_speechta import SpeechTaFeatureExtractor
from .processing_speechta import SpeechTaProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speechta import SpeechTaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speechta import (
SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaHifiGan,
SpeechTaModel,
SpeechTaPreTrainedModel,
)
else:
import sys
a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 650
|
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
a = logging.get_logger(__name__)
a = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
a = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
for attribute in key.split(""".""" ):
lowercase_ = getattr(UpperCAmelCase__ , UpperCAmelCase__ )
if weight_type is not None:
lowercase_ = getattr(UpperCAmelCase__ , UpperCAmelCase__ ).shape
else:
lowercase_ = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
lowercase_ = value
elif weight_type == "weight_g":
lowercase_ = value
elif weight_type == "weight_v":
lowercase_ = value
elif weight_type == "bias":
lowercase_ = value
else:
lowercase_ = value
logger.info(F'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase_ = []
lowercase_ = fairseq_model.state_dict()
lowercase_ = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
lowercase_ = None
for name, value in fairseq_dict.items():
lowercase_ = False
if "conv_layers" in name:
load_conv_layer(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , hf_model.config.feat_extract_norm == """group""" , )
lowercase_ = True
elif name.split(""".""" )[0] == "proj":
lowercase_ = fairseq_model.proj
lowercase_ = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
lowercase_ = True
if "*" in mapped_key:
lowercase_ = name.split(UpperCAmelCase__ )[0].split(""".""" )[-2]
lowercase_ = mapped_key.replace("""*""" , UpperCAmelCase__ )
if "weight_g" in name:
lowercase_ = """weight_g"""
elif "weight_v" in name:
lowercase_ = """weight_v"""
elif "bias" in name:
lowercase_ = """bias"""
elif "weight" in name:
lowercase_ = """weight"""
else:
lowercase_ = None
set_recursively(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
continue
if not is_used:
unused_weights.append(UpperCAmelCase__ )
logger.warning(F'''Unused weights: {unused_weights}''' )
return proj_weight
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase_ = full_name.split("""conv_layers.""" )[-1]
lowercase_ = name.split(""".""" )
lowercase_ = int(items[0] )
lowercase_ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
lowercase_ = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
lowercase_ = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
lowercase_ = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
lowercase_ = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(UpperCAmelCase__ )
def UpperCAmelCase_ ( UpperCAmelCase__ ):
lowercase_ , lowercase_ = emb.weight.shape
lowercase_ = nn.Linear(UpperCAmelCase__ , UpperCAmelCase__ , bias=UpperCAmelCase__ )
lowercase_ = emb.weight.data
return lin_layer
def UpperCAmelCase_ ( UpperCAmelCase__ ):
with open(UpperCAmelCase__ , """r""" , encoding="""utf-8""" ) as f:
lowercase_ = f.readlines()
lowercase_ = [line.split(""" """ )[0] for line in lines]
lowercase_ = len(UpperCAmelCase__ )
lowercase_ = {
"""<s>""": 0,
"""<pad>""": 1,
"""</s>""": 2,
"""<unk>""": 3,
}
vocab_dict.update(dict(zip(UpperCAmelCase__ , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , ):
lowercase_ = WavaVecaConfig.from_pretrained(UpperCAmelCase__ )
lowercase_ = SpeechaTextaConfig.from_pretrained(
UpperCAmelCase__ , vocab_size=UpperCAmelCase__ , decoder_layers=UpperCAmelCase__ , do_stable_layer_norm=UpperCAmelCase__ )
lowercase_ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , )
lowercase_ , lowercase_ , lowercase_ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
lowercase_ = model[0].eval()
# set weights for wav2vec2 encoder
lowercase_ = WavaVecaModel(UpperCAmelCase__ )
lowercase_ = recursively_load_weights_wavaveca(model.encoder , UpperCAmelCase__ )
lowercase_ = SpeechaTextaForCausalLM(UpperCAmelCase__ )
lowercase_ , lowercase_ = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=UpperCAmelCase__ )
# set output linear layer
unexpected_keys.remove("""embed_out""" )
lowercase_ = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(F'''The following keys are missing when loading the decoder weights: {missing_keys}''' )
logger.warning(F'''The following keys are unexpected when loading the decoder weights: {unexpected_keys}''' )
lowercase_ = SpeechEncoderDecoderModel(encoder=UpperCAmelCase__ , decoder=UpperCAmelCase__ )
lowercase_ = False
# add projection layer
lowercase_ = nn.Parameter(projection_layer.weight )
lowercase_ = nn.Parameter(projection_layer.bias )
lowercase_ = create_vocab_dict(UpperCAmelCase__ )
with open(os.path.join(UpperCAmelCase__ , """vocab.json""" ) , """w""" ) as fp:
json.dump(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase_ = SpeechaTextaTokenizer(os.path.join(UpperCAmelCase__ , """vocab.json""" ) )
tokenizer.save_pretrained(UpperCAmelCase__ )
lowercase_ = hf_wavavec.config.to_dict()
lowercase_ = tokenizer.pad_token_id
lowercase_ = tokenizer.bos_token_id
lowercase_ = tokenizer.eos_token_id
lowercase_ = """speech_to_text_2"""
lowercase_ = """wav2vec2"""
lowercase_ = SpeechEncoderDecoderConfig.from_dict(UpperCAmelCase__ )
hf_wavavec.save_pretrained(UpperCAmelCase__ )
feature_extractor.save_pretrained(UpperCAmelCase__ )
if __name__ == "__main__":
a = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument(
'--encoder_config_path',
default='facebook/wav2vec2-large-lv60',
type=str,
help='Path to hf encoder wav2vec2 checkpoint config',
)
parser.add_argument(
'--decoder_config_path',
default='facebook/s2t-small-mustc-en-fr-st',
type=str,
help='Path to hf decoder s2t checkpoint config',
)
parser.add_argument('--vocab_size', default=1_0_2_2_4, type=int, help='Vocab size of decoder')
parser.add_argument('--num_decoder_layers', default=7, type=int, help='Number of decoder layers')
a = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 650
| 1
|
import unittest
from transformers import JukeboxTokenizer
from transformers.testing_utils import require_torch
class UpperCamelCase__ ( unittest.TestCase ):
__SCREAMING_SNAKE_CASE : int = JukeboxTokenizer
__SCREAMING_SNAKE_CASE : str = {
'artist': 'Zac Brown Band',
'genres': 'Country',
'lyrics': 'I met a traveller from an antique land,\n Who said "Two vast and trunkless legs of stone\n Stand in the desert. . . . Near them, on the sand,\n Half sunk a shattered visage lies, whose frown,\n And wrinkled lip, and sneer of cold command,\n Tell that its sculptor well those passions read\n Which yet survive, stamped on these lifeless things,\n The hand that mocked them, and the heart that fed;\n And on the pedestal, these words appear:\n My name is Ozymandias, King of Kings;\n Look on my Works, ye Mighty, and despair!\n Nothing beside remains. Round the decay\n Of that colossal Wreck, boundless and bare\n The lone and level sands stretch far away\n ',
}
@require_torch
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
import torch
lowercase_ = JukeboxTokenizer.from_pretrained("""openai/jukebox-1b-lyrics""" )
lowercase_ = tokenizer(**self.metas )["""input_ids"""]
# fmt: off
lowercase_ = [
torch.tensor([[
0, 0, 0, 7_169, 507, 9, 76, 39, 31, 46, 76, 27,
76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32,
44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43,
47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76,
76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35,
30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76,
27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45,
45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46,
41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31,
76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63,
76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39,
64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40,
30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8,
27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45,
34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45,
27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34,
41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76,
76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49,
44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64,
76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41,
32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27,
40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46,
45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49,
31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27,
45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78,
76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29,
34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48,
31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41,
40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31,
38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64,
78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31,
76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39,
41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76,
27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44,
46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78,
76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76,
41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45,
46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49,
41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65,
78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76,
40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39,
27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33,
76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76,
76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76,
41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64,
76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76,
27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67,
78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46,
34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76,
44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47,
40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51,
78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76,
46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27,
38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47,
40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28,
27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30,
76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45,
76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44,
76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76,
76, 76]] ),
torch.tensor([[0, 0, 0, 1_069, 11]] ),
torch.tensor([[0, 0, 0, 1_069, 11]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
@require_torch
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
import torch
lowercase_ = JukeboxTokenizer.from_pretrained("""openai/jukebox-5b-lyrics""" )
lowercase_ = tokenizer(**self.metas )["""input_ids"""]
# fmt: off
lowercase_ = [
torch.tensor([[
0, 0, 0, 1_069, 11, -1, -1, -1, -1, 9, 77, 39,
31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38,
31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27,
40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64,
79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41,
77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48,
27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40,
37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41,
32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40,
77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63,
77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77,
46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31,
77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77,
77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37,
77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30,
77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45,
64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49,
40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1,
40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77,
38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31,
31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29,
41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27,
46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46,
41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45,
31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44,
31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77,
23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47,
44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42,
31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77,
38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35,
40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77,
77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34,
27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34,
31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77,
34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32,
31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77,
1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42,
31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31,
45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42,
31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77,
77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77,
15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77,
11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33,
45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12,
41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41,
44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34,
46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42,
27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77,
77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45,
35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63,
77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30,
31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77,
77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38,
41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64,
77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27,
40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31,
77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45,
27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34,
77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77,
77, 77, 77, 77, 77, 77]] ),
torch.tensor([[0, 0, 0, 1_069, 11, -1, -1, -1, -1]] ),
torch.tensor([[0, 0, 0, 1_069, 11, -1, -1, -1, -1]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
| 650
|
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a = logging.get_logger(__name__)
# TODO Update this
a = {
'facebook/esm-1b': 'https://huggingface.co/facebook/esm-1b/resolve/main/config.json',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class UpperCamelCase__ ( __magic_name__ ):
__SCREAMING_SNAKE_CASE : Tuple = 'esm'
def __init__( self : Optional[Any] , UpperCamelCase__ : Dict=None , UpperCamelCase__ : Tuple=None , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : List[Any]=768 , UpperCamelCase__ : Tuple=12 , UpperCamelCase__ : Optional[int]=12 , UpperCamelCase__ : Dict=3_072 , UpperCamelCase__ : Union[str, Any]=0.1 , UpperCamelCase__ : Dict=0.1 , UpperCamelCase__ : Optional[int]=1_026 , UpperCamelCase__ : Any=0.02 , UpperCamelCase__ : Dict=1e-12 , UpperCamelCase__ : List[str]="absolute" , UpperCamelCase__ : Union[str, Any]=True , UpperCamelCase__ : Tuple=None , UpperCamelCase__ : Union[str, Any]=False , UpperCamelCase__ : Optional[int]=False , UpperCamelCase__ : Dict=None , UpperCamelCase__ : str=None , **UpperCamelCase__ : str , ):
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase__ , mask_token_id=UpperCamelCase__ , **UpperCamelCase__ )
lowercase_ = vocab_size
lowercase_ = hidden_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = intermediate_size
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = max_position_embeddings
lowercase_ = initializer_range
lowercase_ = layer_norm_eps
lowercase_ = position_embedding_type
lowercase_ = use_cache
lowercase_ = emb_layer_norm_before
lowercase_ = token_dropout
lowercase_ = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info("""No esmfold_config supplied for folding model, using default values.""" )
lowercase_ = EsmFoldConfig()
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
lowercase_ = EsmFoldConfig(**UpperCamelCase__ )
lowercase_ = esmfold_config
if vocab_list is None:
logger.warning("""No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!""" )
lowercase_ = get_default_vocab_list()
else:
lowercase_ = vocab_list
else:
lowercase_ = None
lowercase_ = None
if self.esmfold_config is not None and getattr(self.esmfold_config , """use_esm_attn_map""" , UpperCamelCase__ ):
raise ValueError("""The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!""" )
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
lowercase_ = super().to_dict()
if isinstance(self.esmfold_config , UpperCamelCase__ ):
lowercase_ = self.esmfold_config.to_dict()
return output
@dataclass
class UpperCamelCase__ :
__SCREAMING_SNAKE_CASE : str = None
__SCREAMING_SNAKE_CASE : bool = True
__SCREAMING_SNAKE_CASE : bool = False
__SCREAMING_SNAKE_CASE : bool = False
__SCREAMING_SNAKE_CASE : bool = False
__SCREAMING_SNAKE_CASE : float = 0
__SCREAMING_SNAKE_CASE : bool = True
__SCREAMING_SNAKE_CASE : bool = False
__SCREAMING_SNAKE_CASE : int = 128
__SCREAMING_SNAKE_CASE : "TrunkConfig" = None
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
if self.trunk is None:
lowercase_ = TrunkConfig()
elif isinstance(self.trunk , UpperCamelCase__ ):
lowercase_ = TrunkConfig(**self.trunk )
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ = asdict(self )
lowercase_ = self.trunk.to_dict()
return output
@dataclass
class UpperCamelCase__ :
__SCREAMING_SNAKE_CASE : int = 48
__SCREAMING_SNAKE_CASE : int = 1024
__SCREAMING_SNAKE_CASE : int = 128
__SCREAMING_SNAKE_CASE : int = 32
__SCREAMING_SNAKE_CASE : int = 32
__SCREAMING_SNAKE_CASE : int = 32
__SCREAMING_SNAKE_CASE : float = 0
__SCREAMING_SNAKE_CASE : float = 0
__SCREAMING_SNAKE_CASE : bool = False
__SCREAMING_SNAKE_CASE : int = 4
__SCREAMING_SNAKE_CASE : Optional[int] = 128
__SCREAMING_SNAKE_CASE : "StructureModuleConfig" = None
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
if self.structure_module is None:
lowercase_ = StructureModuleConfig()
elif isinstance(self.structure_module , UpperCamelCase__ ):
lowercase_ = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(F'''`max_recycles` should be positive, got {self.max_recycles}.''' )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
"""`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got"""
F''' {self.sequence_state_dim} and {self.sequence_state_dim}.''' )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
"""`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got"""
F''' {self.pairwise_state_dim} and {self.pairwise_state_dim}.''' )
lowercase_ = self.sequence_state_dim // self.sequence_head_width
lowercase_ = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
"""`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got"""
F''' {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.''' )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
"""`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got"""
F''' {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.''' )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(F'''`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.''' )
if self.dropout >= 0.4:
raise ValueError(F'''`dropout` should not be greater than 0.4, got {self.dropout}.''' )
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ = asdict(self )
lowercase_ = self.structure_module.to_dict()
return output
@dataclass
class UpperCamelCase__ :
__SCREAMING_SNAKE_CASE : int = 384
__SCREAMING_SNAKE_CASE : int = 128
__SCREAMING_SNAKE_CASE : int = 16
__SCREAMING_SNAKE_CASE : int = 128
__SCREAMING_SNAKE_CASE : int = 12
__SCREAMING_SNAKE_CASE : int = 4
__SCREAMING_SNAKE_CASE : int = 8
__SCREAMING_SNAKE_CASE : float = 0.1
__SCREAMING_SNAKE_CASE : int = 8
__SCREAMING_SNAKE_CASE : int = 1
__SCREAMING_SNAKE_CASE : int = 2
__SCREAMING_SNAKE_CASE : int = 7
__SCREAMING_SNAKE_CASE : int = 10
__SCREAMING_SNAKE_CASE : float = 1e-8
__SCREAMING_SNAKE_CASE : float = 1e5
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
return asdict(self )
def UpperCAmelCase_ ( ):
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 650
| 1
|
from __future__ import annotations
def UpperCAmelCase_ ( UpperCAmelCase__ ):
lowercase_ = [True] * limit
lowercase_ = False
lowercase_ = False
lowercase_ = True
for i in range(3 , int(limit**0.5 + 1 ) , 2 ):
lowercase_ = i * 2
while index < limit:
lowercase_ = False
lowercase_ = index + i
lowercase_ = [2]
for i in range(3 , UpperCAmelCase__ , 2 ):
if is_prime[i]:
primes.append(UpperCAmelCase__ )
return primes
def UpperCAmelCase_ ( UpperCAmelCase__ = 1_0_0_0_0_0_0 ):
lowercase_ = prime_sieve(UpperCAmelCase__ )
lowercase_ = 0
lowercase_ = 0
for i in range(len(UpperCAmelCase__ ) ):
for j in range(i + length , len(UpperCAmelCase__ ) ):
lowercase_ = sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
lowercase_ = j - i
lowercase_ = sol
return largest
if __name__ == "__main__":
print(F'''{solution() = }''')
| 650
|
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import platform
import numpy as np
import psutil
import torch
from accelerate import __version__ as version
from accelerate.commands.config import default_config_file, load_config_from_file
from ..utils import is_npu_available, is_xpu_available
def UpperCAmelCase_ ( UpperCAmelCase__=None ):
if subparsers is not None:
lowercase_ = subparsers.add_parser("""env""" )
else:
lowercase_ = argparse.ArgumentParser("""Accelerate env command""" )
parser.add_argument(
"""--config_file""" , default=UpperCAmelCase__ , help="""The config file to use for the default values in the launching script.""" )
if subparsers is not None:
parser.set_defaults(func=UpperCAmelCase__ )
return parser
def UpperCAmelCase_ ( UpperCAmelCase__ ):
lowercase_ = torch.__version__
lowercase_ = torch.cuda.is_available()
lowercase_ = is_xpu_available()
lowercase_ = is_npu_available()
lowercase_ = """Not found"""
# Get the default from the config file.
if args.config_file is not None or os.path.isfile(UpperCAmelCase__ ):
lowercase_ = load_config_from_file(args.config_file ).to_dict()
lowercase_ = {
"""`Accelerate` version""": version,
"""Platform""": platform.platform(),
"""Python version""": platform.python_version(),
"""Numpy version""": np.__version__,
"""PyTorch version (GPU?)""": F'''{pt_version} ({pt_cuda_available})''',
"""PyTorch XPU available""": str(UpperCAmelCase__ ),
"""PyTorch NPU available""": str(UpperCAmelCase__ ),
"""System RAM""": F'''{psutil.virtual_memory().total / 1_0_2_4 ** 3:.2f} GB''',
}
if pt_cuda_available:
lowercase_ = torch.cuda.get_device_name()
print("""\nCopy-and-paste the text below in your GitHub issue\n""" )
print("""\n""".join([F'''- {prop}: {val}''' for prop, val in info.items()] ) )
print("""- `Accelerate` default config:""" if args.config_file is None else """- `Accelerate` config passed:""" )
lowercase_ = (
"""\n""".join([F'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()] )
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
else F'''\t{accelerate_config}'''
)
print(UpperCAmelCase__ )
lowercase_ = accelerate_config
return info
def UpperCAmelCase_ ( ):
lowercase_ = env_command_parser()
lowercase_ = parser.parse_args()
env_command(UpperCAmelCase__ )
return 0
if __name__ == "__main__":
raise SystemExit(main())
| 650
| 1
|
from ...configuration_utils import PretrainedConfig
class UpperCamelCase__ ( __magic_name__ ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = 'bert-generation'
def __init__( self : List[Any] , UpperCamelCase__ : List[Any]=50_358 , UpperCamelCase__ : Any=1_024 , UpperCamelCase__ : Optional[int]=24 , UpperCamelCase__ : Optional[int]=16 , UpperCamelCase__ : Optional[Any]=4_096 , UpperCamelCase__ : Optional[Any]="gelu" , UpperCamelCase__ : Optional[Any]=0.1 , UpperCamelCase__ : List[Any]=0.1 , UpperCamelCase__ : str=512 , UpperCamelCase__ : Optional[int]=0.02 , UpperCamelCase__ : Union[str, Any]=1e-12 , UpperCamelCase__ : Any=0 , UpperCamelCase__ : Optional[Any]=2 , UpperCamelCase__ : Tuple=1 , UpperCamelCase__ : Optional[int]="absolute" , UpperCamelCase__ : Optional[int]=True , **UpperCamelCase__ : Optional[int] , ):
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ )
lowercase_ = vocab_size
lowercase_ = hidden_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = hidden_act
lowercase_ = intermediate_size
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = max_position_embeddings
lowercase_ = initializer_range
lowercase_ = layer_norm_eps
lowercase_ = position_embedding_type
lowercase_ = use_cache
| 650
|
import inspect
import unittest
import warnings
from transformers import DeiTConfig
from transformers.models.auto import get_values
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
)
from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class UpperCamelCase__ :
def __init__( self : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple=13 , UpperCamelCase__ : Tuple=30 , UpperCamelCase__ : List[Any]=2 , UpperCamelCase__ : Union[str, Any]=3 , UpperCamelCase__ : Union[str, Any]=True , UpperCamelCase__ : Tuple=True , UpperCamelCase__ : int=32 , UpperCamelCase__ : Tuple=5 , UpperCamelCase__ : List[Any]=4 , UpperCamelCase__ : Dict=37 , UpperCamelCase__ : Tuple="gelu" , UpperCamelCase__ : Union[str, Any]=0.1 , UpperCamelCase__ : Optional[int]=0.1 , UpperCamelCase__ : Any=10 , UpperCamelCase__ : Optional[int]=0.02 , UpperCamelCase__ : Dict=3 , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : Union[str, Any]=2 , ):
'''simple docstring'''
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = image_size
lowercase_ = patch_size
lowercase_ = num_channels
lowercase_ = is_training
lowercase_ = use_labels
lowercase_ = hidden_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = intermediate_size
lowercase_ = hidden_act
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = type_sequence_label_size
lowercase_ = initializer_range
lowercase_ = scope
lowercase_ = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
lowercase_ = (image_size // patch_size) ** 2
lowercase_ = num_patches + 2
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase_ = None
if self.use_labels:
lowercase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase_ = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def UpperCAmelCase__ ( self : Tuple , UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple ):
'''simple docstring'''
lowercase_ = DeiTModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowercase_ = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase__ ( self : Optional[int] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[str] , UpperCamelCase__ : str ):
'''simple docstring'''
lowercase_ = DeiTForMaskedImageModeling(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowercase_ = model(UpperCamelCase__ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowercase_ = 1
lowercase_ = DeiTForMaskedImageModeling(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowercase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase_ = model(UpperCamelCase__ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def UpperCAmelCase__ ( self : Tuple , UpperCamelCase__ : str , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[str] ):
'''simple docstring'''
lowercase_ = self.type_sequence_label_size
lowercase_ = DeiTForImageClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowercase_ = model(UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowercase_ = 1
lowercase_ = DeiTForImageClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowercase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase_ = model(UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
lowercase_ = self.prepare_config_and_inputs()
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) = config_and_inputs
lowercase_ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( __magic_name__ , __magic_name__ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Any = (
(
DeiTModel,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
__SCREAMING_SNAKE_CASE : str = (
{
'feature-extraction': DeiTModel,
'image-classification': (DeiTForImageClassification, DeiTForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
__SCREAMING_SNAKE_CASE : Dict = False
__SCREAMING_SNAKE_CASE : List[Any] = False
__SCREAMING_SNAKE_CASE : List[Any] = False
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ = DeiTModelTester(self )
lowercase_ = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ , hidden_size=37 )
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""DeiT does not use inputs_embeds""" )
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
pass
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ = model_class(UpperCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase__ , nn.Linear ) )
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ = model_class(UpperCamelCase__ )
lowercase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ = [*signature.parameters.keys()]
lowercase_ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*UpperCamelCase__ )
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase__ )
def UpperCAmelCase__ ( self : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any]=False ):
'''simple docstring'''
lowercase_ = super()._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
if return_labels:
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
if not self.model_tester.is_training:
return
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ = True
for model_class in self.all_model_classes:
# DeiTForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(UpperCamelCase__ )
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
lowercase_ = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.train()
lowercase_ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
lowercase_ = model(**UpperCamelCase__ ).loss
loss.backward()
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
lowercase_ = False
lowercase_ = True
for model_class in self.all_model_classes:
if model_class in get_values(UpperCamelCase__ ) or not model_class.supports_gradient_checkpointing:
continue
# DeiTForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
continue
lowercase_ = model_class(UpperCamelCase__ )
model.gradient_checkpointing_enable()
model.to(UpperCamelCase__ )
model.train()
lowercase_ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
lowercase_ = model(**UpperCamelCase__ ).loss
loss.backward()
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ = [
{"""title""": """multi_label_classification""", """num_labels""": 2, """dtype""": torch.float},
{"""title""": """single_label_classification""", """num_labels""": 1, """dtype""": torch.long},
{"""title""": """regression""", """num_labels""": 1, """dtype""": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(UpperCamelCase__ ),
*get_values(UpperCamelCase__ ),
]
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F'''Testing {model_class} with {problem_type["title"]}''' ):
lowercase_ = problem_type["""title"""]
lowercase_ = problem_type["""num_labels"""]
lowercase_ = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.train()
lowercase_ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
if problem_type["num_labels"] > 1:
lowercase_ = inputs["""labels"""].unsqueeze(1 ).repeat(1 , problem_type["""num_labels"""] )
lowercase_ = inputs["""labels"""].to(problem_type["""dtype"""] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=UpperCamelCase__ ) as warning_list:
lowercase_ = model(**UpperCamelCase__ ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F'''Something is going wrong in the regression problem: intercepted {w.message}''' )
loss.backward()
@slow
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ = DeiTModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def UpperCAmelCase_ ( ):
lowercase_ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class UpperCamelCase__ ( unittest.TestCase ):
@cached_property
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
return (
DeiTImageProcessor.from_pretrained("""facebook/deit-base-distilled-patch16-224""" )
if is_vision_available()
else None
)
@slow
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
lowercase_ = DeiTForImageClassificationWithTeacher.from_pretrained("""facebook/deit-base-distilled-patch16-224""" ).to(
UpperCamelCase__ )
lowercase_ = self.default_image_processor
lowercase_ = prepare_img()
lowercase_ = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
lowercase_ = model(**UpperCamelCase__ )
# verify the logits
lowercase_ = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
lowercase_ = torch.tensor([-1.0_266, 0.1_912, -1.2_861] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1e-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ = DeiTModel.from_pretrained(
"""facebook/deit-base-distilled-patch16-224""" , torch_dtype=torch.floataa , device_map="""auto""" )
lowercase_ = self.default_image_processor
lowercase_ = prepare_img()
lowercase_ = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" )
lowercase_ = inputs.pixel_values.to(UpperCamelCase__ )
# forward pass to make sure inference works in fp16
with torch.no_grad():
lowercase_ = model(UpperCamelCase__ )
| 650
| 1
|
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def UpperCAmelCase_ ( UpperCAmelCase__ ):
if is_torch_version("""<""" , """2.0.0""" ) or not hasattr(UpperCAmelCase__ , """_dynamo""" ):
return False
return isinstance(UpperCAmelCase__ , torch._dynamo.eval_frame.OptimizedModule )
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ = True ):
lowercase_ = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
lowercase_ = is_compiled_module(UpperCAmelCase__ )
if is_compiled:
lowercase_ = model
lowercase_ = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase_ = model.module
if not keep_fpaa_wrapper:
lowercase_ = getattr(UpperCAmelCase__ , """forward""" )
lowercase_ = model.__dict__.pop("""_original_forward""" , UpperCAmelCase__ )
if original_forward is not None:
while hasattr(UpperCAmelCase__ , """__wrapped__""" ):
lowercase_ = forward.__wrapped__
if forward == original_forward:
break
lowercase_ = forward
if getattr(UpperCAmelCase__ , """_converted_to_transformer_engine""" , UpperCAmelCase__ ):
convert_model(UpperCAmelCase__ , to_transformer_engine=UpperCAmelCase__ )
if is_compiled:
lowercase_ = model
lowercase_ = compiled_model
return model
def UpperCAmelCase_ ( ):
PartialState().wait_for_everyone()
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ):
if PartialState().distributed_type == DistributedType.TPU:
xm.save(UpperCAmelCase__ , UpperCAmelCase__ )
elif PartialState().local_process_index == 0:
torch.save(UpperCAmelCase__ , UpperCAmelCase__ )
@contextmanager
def UpperCAmelCase_ ( **UpperCAmelCase__ ):
for key, value in kwargs.items():
lowercase_ = str(UpperCAmelCase__ )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def UpperCAmelCase_ ( UpperCAmelCase__ ):
if not hasattr(UpperCAmelCase__ , """__qualname__""" ) and not hasattr(UpperCAmelCase__ , """__name__""" ):
lowercase_ = getattr(UpperCAmelCase__ , """__class__""" , UpperCAmelCase__ )
if hasattr(UpperCAmelCase__ , """__qualname__""" ):
return obj.__qualname__
if hasattr(UpperCAmelCase__ , """__name__""" ):
return obj.__name__
return str(UpperCAmelCase__ )
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ):
for key, value in source.items():
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase_ = destination.setdefault(UpperCAmelCase__ , {} )
merge_dicts(UpperCAmelCase__ , UpperCAmelCase__ )
else:
lowercase_ = value
return destination
def UpperCAmelCase_ ( UpperCAmelCase__ = None ):
if port is None:
lowercase_ = 2_9_5_0_0
with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s:
return s.connect_ex(("""localhost""", port) ) == 0
| 650
|
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 650
| 1
|
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
a = get_logger()
a = None
class UpperCamelCase__ ( TensorFormatter[Mapping, 'jax.Array', Mapping] ):
def __init__( self : Optional[int] , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : Optional[int]=None , **UpperCamelCase__ : Any ):
'''simple docstring'''
super().__init__(features=UpperCamelCase__ )
import jax
from jaxlib.xla_client import Device
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise ValueError(
F'''Expected {device} to be a `str` not {type(UpperCamelCase__ )}, as `jaxlib.xla_extension.Device` '''
"""is not serializable neither with `pickle` nor with `dill`. Instead you can surround """
"""the device with `str()` to get its string identifier that will be internally mapped """
"""to the actual `jaxlib.xla_extension.Device`.""" )
lowercase_ = device if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
lowercase_ = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
F'''Device with string identifier {self.device} not listed among the available '''
F'''devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default '''
F'''device: {str(jax.devices()[0] )}.''' )
lowercase_ = str(jax.devices()[0] )
lowercase_ = jnp_array_kwargs
@staticmethod
def UpperCAmelCase__ ( ):
'''simple docstring'''
import jax
return {str(UpperCamelCase__ ): device for device in jax.devices()}
def UpperCAmelCase__ ( self : Any , UpperCamelCase__ : List[Any] ):
'''simple docstring'''
import jax
import jax.numpy as jnp
if isinstance(UpperCamelCase__ , UpperCamelCase__ ) and column:
if all(
isinstance(UpperCamelCase__ , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(UpperCamelCase__ , axis=0 )
return column
def UpperCAmelCase__ ( self : int , UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
import jax
import jax.numpy as jnp
if isinstance(UpperCamelCase__ , (str, bytes, type(UpperCamelCase__ )) ):
return value
elif isinstance(UpperCamelCase__ , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
lowercase_ = {}
if isinstance(UpperCamelCase__ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
lowercase_ = {"""dtype""": jnp.intaa}
else:
lowercase_ = {"""dtype""": jnp.intaa}
elif isinstance(UpperCamelCase__ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
lowercase_ = {"""dtype""": jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(UpperCamelCase__ , PIL.Image.Image ):
lowercase_ = np.asarray(UpperCamelCase__ )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
lowercase_ = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(UpperCamelCase__ , **{**default_dtype, **self.jnp_array_kwargs} )
def UpperCAmelCase__ ( self : List[str] , UpperCamelCase__ : Union[str, Any] ):
'''simple docstring'''
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(UpperCamelCase__ , torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(UpperCamelCase__ , """__array__""" ) and not isinstance(UpperCamelCase__ , jax.Array ):
lowercase_ = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(UpperCamelCase__ , np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(UpperCamelCase__ ) for substruct in data_struct] )
elif isinstance(UpperCamelCase__ , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(UpperCamelCase__ ) for substruct in data_struct] )
return self._tensorize(UpperCamelCase__ )
def UpperCAmelCase__ ( self : List[Any] , UpperCamelCase__ : dict ):
'''simple docstring'''
return map_nested(self._recursive_tensorize , UpperCamelCase__ , map_list=UpperCamelCase__ )
def UpperCAmelCase__ ( self : str , UpperCamelCase__ : pa.Table ):
'''simple docstring'''
lowercase_ = self.numpy_arrow_extractor().extract_row(UpperCamelCase__ )
lowercase_ = self.python_features_decoder.decode_row(UpperCamelCase__ )
return self.recursive_tensorize(UpperCamelCase__ )
def UpperCAmelCase__ ( self : Optional[Any] , UpperCamelCase__ : pa.Table ):
'''simple docstring'''
lowercase_ = self.numpy_arrow_extractor().extract_column(UpperCamelCase__ )
lowercase_ = self.python_features_decoder.decode_column(UpperCamelCase__ , pa_table.column_names[0] )
lowercase_ = self.recursive_tensorize(UpperCamelCase__ )
lowercase_ = self._consolidate(UpperCamelCase__ )
return column
def UpperCAmelCase__ ( self : Union[str, Any] , UpperCamelCase__ : pa.Table ):
'''simple docstring'''
lowercase_ = self.numpy_arrow_extractor().extract_batch(UpperCamelCase__ )
lowercase_ = self.python_features_decoder.decode_batch(UpperCamelCase__ )
lowercase_ = self.recursive_tensorize(UpperCamelCase__ )
for column_name in batch:
lowercase_ = self._consolidate(batch[column_name] )
return batch
| 650
|
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
a = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class UpperCamelCase__ ( __magic_name__ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[Any] = XGLMTokenizer
__SCREAMING_SNAKE_CASE : List[Any] = XGLMTokenizerFast
__SCREAMING_SNAKE_CASE : List[Any] = True
__SCREAMING_SNAKE_CASE : int = True
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
lowercase_ = XGLMTokenizer(UpperCamelCase__ , keep_accents=UpperCamelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
lowercase_ = """<pad>"""
lowercase_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase__ ) , UpperCamelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase__ ) , UpperCamelCase__ )
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
lowercase_ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(len(UpperCamelCase__ ) , 1_008 )
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1_008 )
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
lowercase_ = XGLMTokenizer(UpperCamelCase__ , keep_accents=UpperCamelCase__ )
lowercase_ = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(UpperCamelCase__ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
lowercase_ = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
UpperCamelCase__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
lowercase_ = tokenizer.convert_tokens_to_ids(UpperCamelCase__ )
self.assertListEqual(
UpperCamelCase__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
lowercase_ = tokenizer.convert_ids_to_tokens(UpperCamelCase__ )
self.assertListEqual(
UpperCamelCase__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
@cached_property
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
return XGLMTokenizer.from_pretrained("""facebook/xglm-564M""" )
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(UpperCamelCase__ , f.name )
lowercase_ = XGLMTokenizer(f.name , keep_accents=UpperCamelCase__ )
lowercase_ = pickle.dumps(UpperCamelCase__ )
pickle.loads(UpperCamelCase__ )
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
lowercase_ = self.get_tokenizer()
lowercase_ = self.get_rust_tokenizer()
lowercase_ = """I was born in 92000, and this is falsé."""
lowercase_ = tokenizer.tokenize(UpperCamelCase__ )
lowercase_ = rust_tokenizer.tokenize(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
lowercase_ = tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
lowercase_ = rust_tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
lowercase_ = self.get_rust_tokenizer()
lowercase_ = tokenizer.encode(UpperCamelCase__ )
lowercase_ = rust_tokenizer.encode(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
@slow
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ = """Hello World!"""
lowercase_ = [2, 31_227, 4_447, 35]
self.assertListEqual(UpperCamelCase__ , self.big_tokenizer.encode(UpperCamelCase__ ) )
@slow
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
lowercase_ = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth"""
)
# fmt: off
lowercase_ = [2, 1_018, 67, 11, 1_988, 2_617, 5_631, 278, 11, 3_407, 48, 71_630, 28_085, 4, 3_234, 157, 13, 6, 5, 6, 4, 3_526, 768, 15, 659, 57, 298, 3_983, 864, 129, 21, 6, 5, 13_675, 377, 652, 7_580, 10_341, 155, 2_817, 422, 1_666, 7, 1_674, 53, 113, 202_277, 17_892, 33, 60, 87, 4, 3_234, 157, 61, 2_667, 52_376, 19, 88, 23, 735]
# fmt: on
self.assertListEqual(UpperCamelCase__ , self.big_tokenizer.encode(UpperCamelCase__ ) )
@slow
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ = {
"""input_ids""": [[2, 108_825, 1_163, 15, 88_010, 473, 15_898, 157, 13_672, 1_857, 312, 8, 238_021, 1_163, 53, 13_672, 1_857, 312, 8, 53_283, 182_396, 8, 18_566, 16, 36_733, 4_101, 8, 230, 244_017, 122_553, 7, 15, 132_597, 4, 293, 12_511, 7_610, 4, 3_414, 132_597, 9, 4, 32_361, 362, 4, 734, 28_512, 32_569, 18, 4, 32_361, 26_096, 14_982, 73, 18_715, 21_433, 235_261, 15, 492, 12_427, 16, 53, 18_715, 21_433, 65_454, 15, 23_659, 563, 16, 278, 597, 2_843, 595, 7_931, 182_396, 64_186, 22, 886, 595, 132_981, 53, 25_540, 3_449, 43_982, 39_901, 5_951, 878, 330, 4, 27_694, 80_269, 312, 53, 6_517, 11_780, 611, 20_408, 5], [2, 6, 132_597, 67, 42_897, 33, 592, 8, 163_729, 25_540, 361, 136_997, 109_514, 173_230, 7, 501, 60, 102_913, 196, 5_631, 235, 63_243, 473, 6, 231_757, 74, 5_277, 7_905, 53, 3_095, 37_317, 22, 454, 183_874, 5], [2, 268, 31_298, 46_530, 6, 132_935, 43_831, 7, 597, 32, 24, 3_688, 9_865, 5]],
"""attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCamelCase__ , model_name="""facebook/xglm-564M""" , padding=UpperCamelCase__ , )
| 650
| 1
|
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class UpperCamelCase__ ( __magic_name__ , __magic_name__ , __magic_name__ ):
@register_to_config
def __init__( self : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : float , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : str , UpperCamelCase__ : bool = False , ):
'''simple docstring'''
super().__init__()
lowercase_ = nn.Embedding(UpperCamelCase__ , UpperCamelCase__ )
lowercase_ = nn.Embedding(UpperCamelCase__ , UpperCamelCase__ )
lowercase_ = False
lowercase_ = nn.Dropout(p=UpperCamelCase__ )
lowercase_ = TaConfig(
vocab_size=UpperCamelCase__ , d_model=UpperCamelCase__ , num_heads=UpperCamelCase__ , d_kv=UpperCamelCase__ , d_ff=UpperCamelCase__ , dropout_rate=UpperCamelCase__ , feed_forward_proj=UpperCamelCase__ , is_decoder=UpperCamelCase__ , is_encoder_decoder=UpperCamelCase__ , )
lowercase_ = nn.ModuleList()
for lyr_num in range(UpperCamelCase__ ):
lowercase_ = TaBlock(UpperCamelCase__ )
self.encoders.append(UpperCamelCase__ )
lowercase_ = TaLayerNorm(UpperCamelCase__ )
lowercase_ = nn.Dropout(p=UpperCamelCase__ )
def UpperCAmelCase__ ( self : Optional[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : str ):
'''simple docstring'''
lowercase_ = self.token_embedder(UpperCamelCase__ )
lowercase_ = encoder_input_tokens.shape[1]
lowercase_ = torch.arange(UpperCamelCase__ , device=encoder_input_tokens.device )
x += self.position_encoding(UpperCamelCase__ )
lowercase_ = self.dropout_pre(UpperCamelCase__ )
# inverted the attention mask
lowercase_ = encoder_input_tokens.size()
lowercase_ = self.get_extended_attention_mask(UpperCamelCase__ , UpperCamelCase__ )
for lyr in self.encoders:
lowercase_ = lyr(UpperCamelCase__ , UpperCamelCase__ )[0]
lowercase_ = self.layer_norm(UpperCamelCase__ )
return self.dropout_post(UpperCamelCase__ ), encoder_inputs_mask
| 650
|
import os
import re
import warnings
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_ta import TaTokenizer
else:
a = None
a = logging.get_logger(__name__)
a = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
a = {
'vocab_file': {
't5-small': 'https://huggingface.co/t5-small/resolve/main/spiece.model',
't5-base': 'https://huggingface.co/t5-base/resolve/main/spiece.model',
't5-large': 'https://huggingface.co/t5-large/resolve/main/spiece.model',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/spiece.model',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/spiece.model',
},
'tokenizer_file': {
't5-small': 'https://huggingface.co/t5-small/resolve/main/tokenizer.json',
't5-base': 'https://huggingface.co/t5-base/resolve/main/tokenizer.json',
't5-large': 'https://huggingface.co/t5-large/resolve/main/tokenizer.json',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/tokenizer.json',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/tokenizer.json',
},
}
# TODO(PVP) - this should be removed in Transformers v5
a = {
't5-small': 5_1_2,
't5-base': 5_1_2,
't5-large': 5_1_2,
't5-3b': 5_1_2,
't5-11b': 5_1_2,
}
class UpperCamelCase__ ( __magic_name__ ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE : int = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE : str = ['input_ids', 'attention_mask']
__SCREAMING_SNAKE_CASE : Dict = TaTokenizer
__SCREAMING_SNAKE_CASE : List[int] = []
def __init__( self : int , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : Any=None , UpperCamelCase__ : Dict="</s>" , UpperCamelCase__ : Tuple="<unk>" , UpperCamelCase__ : Optional[Any]="<pad>" , UpperCamelCase__ : Union[str, Any]=100 , UpperCamelCase__ : Optional[Any]=None , **UpperCamelCase__ : List[str] , ):
'''simple docstring'''
if extra_ids > 0 and additional_special_tokens is None:
lowercase_ = [F'''<extra_id_{i}>''' for i in range(UpperCamelCase__ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra special tokens
lowercase_ = len(set(filter(lambda UpperCamelCase__ : bool("""extra_id_""" in str(UpperCamelCase__ ) ) , UpperCamelCase__ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'''
""" provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids"""
""" tokens""" )
super().__init__(
UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , extra_ids=UpperCamelCase__ , additional_special_tokens=UpperCamelCase__ , **UpperCamelCase__ , )
lowercase_ = vocab_file
lowercase_ = False if not self.vocab_file else True
lowercase_ = extra_ids
@staticmethod
def UpperCAmelCase__ ( UpperCamelCase__ : Tuple , UpperCamelCase__ : str , UpperCamelCase__ : int ):
'''simple docstring'''
if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes:
lowercase_ = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
"""This tokenizer was incorrectly instantiated with a model max length of"""
F''' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'''
""" behavior is kept to avoid breaking backwards compatibility when padding/encoding with"""
""" `truncation is True`.\n- Be aware that you SHOULD NOT rely on"""
F''' {pretrained_model_name_or_path} automatically truncating your input to'''
F''' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'''
F''' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'''
""" `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please"""
""" instantiate this tokenizer with `model_max_length` set to your preferred value.""" , UpperCamelCase__ , )
return max_model_length
def UpperCAmelCase__ ( self : int , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(UpperCamelCase__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowercase_ = os.path.join(
UpperCamelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ):
copyfile(self.vocab_file , UpperCamelCase__ )
logger.info(F'''Copy vocab file to {out_vocab_file}''' )
return (out_vocab_file,)
def UpperCAmelCase__ ( self : int , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
'''simple docstring'''
lowercase_ = token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return self.prefix_tokens + token_ids_a
else:
lowercase_ = token_ids_a + [self.eos_token_id]
return self.prefix_tokens + token_ids_a + token_ids_a
def UpperCAmelCase__ ( self : str , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
'''simple docstring'''
lowercase_ = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
return list(
set(filter(lambda UpperCamelCase__ : bool(re.search(R"""<extra_id_\d+>""" , UpperCamelCase__ ) ) is not None , self.additional_special_tokens ) ) )
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
return [self.convert_tokens_to_ids(UpperCamelCase__ ) for token in self.get_sentinel_tokens()]
| 650
| 1
|
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
a = logging.get_logger(__name__)
@add_end_docstrings(__magic_name__ )
class UpperCamelCase__ ( __magic_name__ ):
def __init__( self : Union[str, Any] , **UpperCamelCase__ : Union[str, Any] ):
'''simple docstring'''
super().__init__(**UpperCamelCase__ )
if self.framework == "tf":
raise ValueError(F'''The {self.__class__} is only available in PyTorch.''' )
requires_backends(self , """vision""" )
self.check_model_type(UpperCamelCase__ )
def __call__( self : Dict , UpperCamelCase__ : Union[str, "Image.Image", List[Dict[str, Any]]] , UpperCamelCase__ : Union[str, List[str]] = None , **UpperCamelCase__ : Any , ):
'''simple docstring'''
if "text_queries" in kwargs:
lowercase_ = kwargs.pop("""text_queries""" )
if isinstance(UpperCamelCase__ , (str, Image.Image) ):
lowercase_ = {"""image""": image, """candidate_labels""": candidate_labels}
else:
lowercase_ = image
lowercase_ = super().__call__(UpperCamelCase__ , **UpperCamelCase__ )
return results
def UpperCAmelCase__ ( self : int , **UpperCamelCase__ : Tuple ):
'''simple docstring'''
lowercase_ = {}
if "threshold" in kwargs:
lowercase_ = kwargs["""threshold"""]
if "top_k" in kwargs:
lowercase_ = kwargs["""top_k"""]
return {}, {}, postprocess_params
def UpperCAmelCase__ ( self : str , UpperCamelCase__ : Dict ):
'''simple docstring'''
lowercase_ = load_image(inputs["""image"""] )
lowercase_ = inputs["""candidate_labels"""]
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
lowercase_ = candidate_labels.split(""",""" )
lowercase_ = torch.tensor([[image.height, image.width]] , dtype=torch.intaa )
for i, candidate_label in enumerate(UpperCamelCase__ ):
lowercase_ = self.tokenizer(UpperCamelCase__ , return_tensors=self.framework )
lowercase_ = self.image_processor(UpperCamelCase__ , return_tensors=self.framework )
yield {
"is_last": i == len(UpperCamelCase__ ) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def UpperCAmelCase__ ( self : Optional[Any] , UpperCamelCase__ : Any ):
'''simple docstring'''
lowercase_ = model_inputs.pop("""target_size""" )
lowercase_ = model_inputs.pop("""candidate_label""" )
lowercase_ = model_inputs.pop("""is_last""" )
lowercase_ = self.model(**UpperCamelCase__ )
lowercase_ = {"""target_size""": target_size, """candidate_label""": candidate_label, """is_last""": is_last, **outputs}
return model_outputs
def UpperCAmelCase__ ( self : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict=0.1 , UpperCamelCase__ : List[str]=None ):
'''simple docstring'''
lowercase_ = []
for model_output in model_outputs:
lowercase_ = model_output["""candidate_label"""]
lowercase_ = BaseModelOutput(UpperCamelCase__ )
lowercase_ = self.image_processor.post_process_object_detection(
outputs=UpperCamelCase__ , threshold=UpperCamelCase__ , target_sizes=model_output["""target_size"""] )[0]
for index in outputs["scores"].nonzero():
lowercase_ = outputs["""scores"""][index].item()
lowercase_ = self._get_bounding_box(outputs["""boxes"""][index][0] )
lowercase_ = {"""score""": score, """label""": label, """box""": box}
results.append(UpperCamelCase__ )
lowercase_ = sorted(UpperCamelCase__ , key=lambda UpperCamelCase__ : x["score"] , reverse=UpperCamelCase__ )
if top_k:
lowercase_ = results[:top_k]
return results
def UpperCAmelCase__ ( self : List[Any] , UpperCamelCase__ : "torch.Tensor" ):
'''simple docstring'''
if self.framework != "pt":
raise ValueError("""The ZeroShotObjectDetectionPipeline is only available in PyTorch.""" )
lowercase_ , lowercase_ , lowercase_ , lowercase_ = box.int().tolist()
lowercase_ = {
"""xmin""": xmin,
"""ymin""": ymin,
"""xmax""": xmax,
"""ymax""": ymax,
}
return bbox
| 650
|
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMInverseScheduler,
DDIMScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
StableDiffusionDiffEditPipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_image, slow
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class UpperCamelCase__ ( __magic_name__ , __magic_name__ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Optional[int] = StableDiffusionDiffEditPipeline
__SCREAMING_SNAKE_CASE : Optional[Any] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'height', 'width', 'image'} | {'image_latents'}
__SCREAMING_SNAKE_CASE : List[str] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {'image'} | {'image_latents'}
__SCREAMING_SNAKE_CASE : int = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__SCREAMING_SNAKE_CASE : Any = frozenset([] )
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=UpperCamelCase__ , )
lowercase_ = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=UpperCamelCase__ , set_alpha_to_one=UpperCamelCase__ , )
lowercase_ = DDIMInverseScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=UpperCamelCase__ , set_alpha_to_zero=UpperCamelCase__ , )
torch.manual_seed(0 )
lowercase_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
lowercase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="""gelu""" , projection_dim=512 , )
lowercase_ = CLIPTextModel(UpperCamelCase__ )
lowercase_ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
lowercase_ = {
"""unet""": unet,
"""scheduler""": scheduler,
"""inverse_scheduler""": inverse_scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def UpperCAmelCase__ ( self : int , UpperCamelCase__ : Any , UpperCamelCase__ : Any=0 ):
'''simple docstring'''
lowercase_ = floats_tensor((1, 16, 16) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
lowercase_ = floats_tensor((1, 2, 4, 16, 16) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
if str(UpperCamelCase__ ).startswith("""mps""" ):
lowercase_ = torch.manual_seed(UpperCamelCase__ )
else:
lowercase_ = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
lowercase_ = {
"""prompt""": """a dog and a newt""",
"""mask_image""": mask,
"""image_latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 2,
"""inpaint_strength""": 1.0,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def UpperCAmelCase__ ( self : Optional[int] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : str=0 ):
'''simple docstring'''
lowercase_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
lowercase_ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowercase_ = Image.fromarray(np.uinta(UpperCamelCase__ ) ).convert("""RGB""" )
if str(UpperCamelCase__ ).startswith("""mps""" ):
lowercase_ = torch.manual_seed(UpperCamelCase__ )
else:
lowercase_ = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
lowercase_ = {
"""image""": image,
"""source_prompt""": """a cat and a frog""",
"""target_prompt""": """a dog and a newt""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""num_maps_per_mask""": 2,
"""mask_encode_strength""": 1.0,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def UpperCAmelCase__ ( self : List[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple=0 ):
'''simple docstring'''
lowercase_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
lowercase_ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowercase_ = Image.fromarray(np.uinta(UpperCamelCase__ ) ).convert("""RGB""" )
if str(UpperCamelCase__ ).startswith("""mps""" ):
lowercase_ = torch.manual_seed(UpperCamelCase__ )
else:
lowercase_ = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
lowercase_ = {
"""image""": image,
"""prompt""": """a cat and a frog""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""inpaint_strength""": 1.0,
"""guidance_scale""": 6.0,
"""decode_latents""": True,
"""output_type""": """numpy""",
}
return inputs
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
if not hasattr(self.pipeline_class , """_optional_components""" ):
return
lowercase_ = self.get_dummy_components()
lowercase_ = self.pipeline_class(**UpperCamelCase__ )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
# set all optional components to None and update pipeline config accordingly
for optional_component in pipe._optional_components:
setattr(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} )
lowercase_ = self.get_dummy_inputs(UpperCamelCase__ )
lowercase_ = pipe(**UpperCamelCase__ )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(UpperCamelCase__ )
lowercase_ = self.pipeline_class.from_pretrained(UpperCamelCase__ )
pipe_loaded.to(UpperCamelCase__ )
pipe_loaded.set_progress_bar_config(disable=UpperCamelCase__ )
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(UpperCamelCase__ , UpperCamelCase__ ) is None , F'''`{optional_component}` did not stay set to None after loading.''' , )
lowercase_ = self.get_dummy_inputs(UpperCamelCase__ )
lowercase_ = pipe_loaded(**UpperCamelCase__ )[0]
lowercase_ = np.abs(output - output_loaded ).max()
self.assertLess(UpperCamelCase__ , 1e-4 )
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase_ = """cpu"""
lowercase_ = self.get_dummy_components()
lowercase_ = self.pipeline_class(**UpperCamelCase__ )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
lowercase_ = self.get_dummy_mask_inputs(UpperCamelCase__ )
lowercase_ = pipe.generate_mask(**UpperCamelCase__ )
lowercase_ = mask[0, -3:, -3:]
self.assertEqual(mask.shape , (1, 16, 16) )
lowercase_ = np.array([0] * 9 )
lowercase_ = np.abs(mask_slice.flatten() - expected_slice ).max()
self.assertLessEqual(UpperCamelCase__ , 1e-3 )
self.assertEqual(mask[0, -3, -4] , 0 )
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ = """cpu"""
lowercase_ = self.get_dummy_components()
lowercase_ = self.pipeline_class(**UpperCamelCase__ )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
lowercase_ = self.get_dummy_inversion_inputs(UpperCamelCase__ )
lowercase_ = pipe.invert(**UpperCamelCase__ ).images
lowercase_ = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
lowercase_ = np.array(
[0.5_150, 0.5_134, 0.5_043, 0.5_376, 0.4_694, 0.51_050, 0.5_015, 0.4_407, 0.4_799] , )
lowercase_ = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(UpperCamelCase__ , 1e-3 )
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=5e-3 )
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ = """cpu"""
lowercase_ = self.get_dummy_components()
lowercase_ = {"""beta_start""": 0.00_085, """beta_end""": 0.012, """beta_schedule""": """scaled_linear"""}
lowercase_ = DPMSolverMultistepScheduler(**UpperCamelCase__ )
lowercase_ = DPMSolverMultistepInverseScheduler(**UpperCamelCase__ )
lowercase_ = self.pipeline_class(**UpperCamelCase__ )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
lowercase_ = self.get_dummy_inversion_inputs(UpperCamelCase__ )
lowercase_ = pipe.invert(**UpperCamelCase__ ).images
lowercase_ = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
lowercase_ = np.array(
[0.5_150, 0.5_134, 0.5_043, 0.5_376, 0.4_694, 0.51_050, 0.5_015, 0.4_407, 0.4_799] , )
lowercase_ = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(UpperCamelCase__ , 1e-3 )
@require_torch_gpu
@slow
class UpperCamelCase__ ( unittest.TestCase ):
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@classmethod
def UpperCAmelCase__ ( cls : Dict ):
'''simple docstring'''
lowercase_ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png""" )
lowercase_ = raw_image.convert("""RGB""" ).resize((768, 768) )
lowercase_ = raw_image
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
lowercase_ = torch.manual_seed(0 )
lowercase_ = StableDiffusionDiffEditPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2-1""" , safety_checker=UpperCamelCase__ , torch_dtype=torch.floataa )
lowercase_ = DDIMScheduler.from_config(pipe.scheduler.config )
lowercase_ = DDIMInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
lowercase_ = """a bowl of fruit"""
lowercase_ = """a bowl of pears"""
lowercase_ = pipe.generate_mask(
image=self.raw_image , source_prompt=UpperCamelCase__ , target_prompt=UpperCamelCase__ , generator=UpperCamelCase__ , )
lowercase_ = pipe.invert(
prompt=UpperCamelCase__ , image=self.raw_image , inpaint_strength=0.7 , generator=UpperCamelCase__ ).latents
lowercase_ = pipe(
prompt=UpperCamelCase__ , mask_image=UpperCamelCase__ , image_latents=UpperCamelCase__ , generator=UpperCamelCase__ , negative_prompt=UpperCamelCase__ , inpaint_strength=0.7 , output_type="""numpy""" , ).images[0]
lowercase_ = (
np.array(
load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/diffedit/pears.png""" ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5e-1
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ = torch.manual_seed(0 )
lowercase_ = StableDiffusionDiffEditPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2-1""" , safety_checker=UpperCamelCase__ , torch_dtype=torch.floataa )
lowercase_ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
lowercase_ = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
lowercase_ = """a bowl of fruit"""
lowercase_ = """a bowl of pears"""
lowercase_ = pipe.generate_mask(
image=self.raw_image , source_prompt=UpperCamelCase__ , target_prompt=UpperCamelCase__ , generator=UpperCamelCase__ , )
lowercase_ = pipe.invert(
prompt=UpperCamelCase__ , image=self.raw_image , inpaint_strength=0.7 , generator=UpperCamelCase__ , num_inference_steps=25 , ).latents
lowercase_ = pipe(
prompt=UpperCamelCase__ , mask_image=UpperCamelCase__ , image_latents=UpperCamelCase__ , generator=UpperCamelCase__ , negative_prompt=UpperCamelCase__ , inpaint_strength=0.7 , num_inference_steps=25 , output_type="""numpy""" , ).images[0]
lowercase_ = (
np.array(
load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/diffedit/pears.png""" ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5e-1
| 650
| 1
|
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class UpperCamelCase__ ( __magic_name__ ):
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase_ = SMALL_MODEL_IDENTIFIER
lowercase_ = """pt"""
lowercase_ = """tf"""
def UpperCAmelCase__ ( self : str , UpperCamelCase__ : Tuple ):
'''simple docstring'''
lowercase_ = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(UpperCamelCase__ )
def UpperCAmelCase__ ( self : List[Any] , UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
lowercase_ = TFAutoModel.from_pretrained(self.test_model , from_pt=UpperCamelCase__ )
model_tf.save_pretrained(UpperCamelCase__ )
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
lowercase_ = """mock_framework"""
# Framework provided - return whatever the user provides
lowercase_ = FeaturesManager.determine_framework(self.test_model , UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(UpperCamelCase__ )
lowercase_ = FeaturesManager.determine_framework(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(UpperCamelCase__ )
lowercase_ = FeaturesManager.determine_framework(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(UpperCamelCase__ )
lowercase_ = FeaturesManager.determine_framework(UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(UpperCamelCase__ )
lowercase_ = FeaturesManager.determine_framework(UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(UpperCamelCase__ ):
lowercase_ = FeaturesManager.determine_framework(UpperCamelCase__ )
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
lowercase_ = MagicMock(return_value=UpperCamelCase__ )
with patch("""transformers.onnx.features.is_tf_available""" , UpperCamelCase__ ):
lowercase_ = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(UpperCamelCase__ , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
lowercase_ = MagicMock(return_value=UpperCamelCase__ )
with patch("""transformers.onnx.features.is_torch_available""" , UpperCamelCase__ ):
lowercase_ = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(UpperCamelCase__ , self.framework_tf )
# Both in environment -> use PyTorch
lowercase_ = MagicMock(return_value=UpperCamelCase__ )
lowercase_ = MagicMock(return_value=UpperCamelCase__ )
with patch("""transformers.onnx.features.is_tf_available""" , UpperCamelCase__ ), patch(
"""transformers.onnx.features.is_torch_available""" , UpperCamelCase__ ):
lowercase_ = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(UpperCamelCase__ , self.framework_pt )
# Both not in environment -> raise error
lowercase_ = MagicMock(return_value=UpperCamelCase__ )
lowercase_ = MagicMock(return_value=UpperCamelCase__ )
with patch("""transformers.onnx.features.is_tf_available""" , UpperCamelCase__ ), patch(
"""transformers.onnx.features.is_torch_available""" , UpperCamelCase__ ):
with self.assertRaises(UpperCamelCase__ ):
lowercase_ = FeaturesManager.determine_framework(self.test_model )
| 650
|
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
a = logging.get_logger(__name__)
class UpperCamelCase__ ( __magic_name__ ):
__SCREAMING_SNAKE_CASE : str = ['pixel_values']
def __init__( self : List[Any] , UpperCamelCase__ : bool = True , UpperCamelCase__ : Union[int, float] = 1 / 255 , UpperCamelCase__ : bool = True , UpperCamelCase__ : int = 8 , **UpperCamelCase__ : Optional[Any] , ):
'''simple docstring'''
super().__init__(**UpperCamelCase__ )
lowercase_ = do_rescale
lowercase_ = rescale_factor
lowercase_ = do_pad
lowercase_ = pad_size
def UpperCAmelCase__ ( self : int , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : float , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
return rescale(UpperCamelCase__ , scale=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def UpperCAmelCase__ ( self : List[Any] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : int , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None ):
'''simple docstring'''
lowercase_ , lowercase_ = get_image_size(UpperCamelCase__ )
lowercase_ = (old_height // size + 1) * size - old_height
lowercase_ = (old_width // size + 1) * size - old_width
return pad(UpperCamelCase__ , ((0, pad_height), (0, pad_width)) , mode="""symmetric""" , data_format=UpperCamelCase__ )
def UpperCAmelCase__ ( self : Optional[int] , UpperCamelCase__ : ImageInput , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[float] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , UpperCamelCase__ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **UpperCamelCase__ : Dict , ):
'''simple docstring'''
lowercase_ = do_rescale if do_rescale is not None else self.do_rescale
lowercase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase_ = do_pad if do_pad is not None else self.do_pad
lowercase_ = pad_size if pad_size is not None else self.pad_size
lowercase_ = make_list_of_images(UpperCamelCase__ )
if not valid_images(UpperCamelCase__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
# All transformations expect numpy arrays.
lowercase_ = [to_numpy_array(UpperCamelCase__ ) for image in images]
if do_rescale:
lowercase_ = [self.rescale(image=UpperCamelCase__ , scale=UpperCamelCase__ ) for image in images]
if do_pad:
lowercase_ = [self.pad(UpperCamelCase__ , size=UpperCamelCase__ ) for image in images]
lowercase_ = [to_channel_dimension_format(UpperCamelCase__ , UpperCamelCase__ ) for image in images]
lowercase_ = {"""pixel_values""": images}
return BatchFeature(data=UpperCamelCase__ , tensor_type=UpperCamelCase__ )
| 650
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
a = {
'configuration_trocr': ['TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TrOCRConfig'],
'processing_trocr': ['TrOCRProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
'TROCR_PRETRAINED_MODEL_ARCHIVE_LIST',
'TrOCRForCausalLM',
'TrOCRPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 650
|
def UpperCAmelCase_ ( UpperCAmelCase__ ):
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
raise TypeError("""Input value must be an 'int' type""" )
lowercase_ = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 650
| 1
|
import numpy as np
def UpperCAmelCase_ ( UpperCAmelCase__ ):
return (2 / (1 + np.exp(-2 * vector ))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 650
|
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class UpperCamelCase__ ( __magic_name__ , __magic_name__ , __magic_name__ ):
@register_to_config
def __init__( self : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : float , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : str , UpperCamelCase__ : bool = False , ):
'''simple docstring'''
super().__init__()
lowercase_ = nn.Embedding(UpperCamelCase__ , UpperCamelCase__ )
lowercase_ = nn.Embedding(UpperCamelCase__ , UpperCamelCase__ )
lowercase_ = False
lowercase_ = nn.Dropout(p=UpperCamelCase__ )
lowercase_ = TaConfig(
vocab_size=UpperCamelCase__ , d_model=UpperCamelCase__ , num_heads=UpperCamelCase__ , d_kv=UpperCamelCase__ , d_ff=UpperCamelCase__ , dropout_rate=UpperCamelCase__ , feed_forward_proj=UpperCamelCase__ , is_decoder=UpperCamelCase__ , is_encoder_decoder=UpperCamelCase__ , )
lowercase_ = nn.ModuleList()
for lyr_num in range(UpperCamelCase__ ):
lowercase_ = TaBlock(UpperCamelCase__ )
self.encoders.append(UpperCamelCase__ )
lowercase_ = TaLayerNorm(UpperCamelCase__ )
lowercase_ = nn.Dropout(p=UpperCamelCase__ )
def UpperCAmelCase__ ( self : Optional[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : str ):
'''simple docstring'''
lowercase_ = self.token_embedder(UpperCamelCase__ )
lowercase_ = encoder_input_tokens.shape[1]
lowercase_ = torch.arange(UpperCamelCase__ , device=encoder_input_tokens.device )
x += self.position_encoding(UpperCamelCase__ )
lowercase_ = self.dropout_pre(UpperCamelCase__ )
# inverted the attention mask
lowercase_ = encoder_input_tokens.size()
lowercase_ = self.get_extended_attention_mask(UpperCamelCase__ , UpperCamelCase__ )
for lyr in self.encoders:
lowercase_ = lyr(UpperCamelCase__ , UpperCamelCase__ )[0]
lowercase_ = self.layer_norm(UpperCamelCase__ )
return self.dropout_post(UpperCamelCase__ ), encoder_inputs_mask
| 650
| 1
|
from math import sqrt
def UpperCAmelCase_ ( UpperCAmelCase__ ):
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and (
number >= 0
), "'number' must been an int and positive"
lowercase_ = True
# 0 and 1 are none primes.
if number <= 1:
lowercase_ = False
for divisor in range(2 , int(round(sqrt(UpperCAmelCase__ ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
lowercase_ = False
break
# precondition
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ ), "'status' must been from type bool"
return status
def UpperCAmelCase_ ( UpperCAmelCase__ ):
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
lowercase_ = list(range(2 , n + 1 ) )
lowercase_ = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(UpperCAmelCase__ ) ):
for j in range(i + 1 , len(UpperCAmelCase__ ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
lowercase_ = 0
# filters actual prime numbers.
lowercase_ = [x for x in begin_list if x != 0]
# precondition
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ ), "'ans' must been from type list"
return ans
def UpperCAmelCase_ ( UpperCAmelCase__ ):
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and (n > 2), "'N' must been an int and > 2"
lowercase_ = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1 ):
if is_prime(UpperCAmelCase__ ):
ans.append(UpperCAmelCase__ )
# precondition
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ ), "'ans' must been from type list"
return ans
def UpperCAmelCase_ ( UpperCAmelCase__ ):
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and number >= 0, "'number' must been an int and >= 0"
lowercase_ = [] # this list will be returns of the function.
# potential prime number factors.
lowercase_ = 2
lowercase_ = number
if number == 0 or number == 1:
ans.append(UpperCAmelCase__ )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(UpperCAmelCase__ ):
while quotient != 1:
if is_prime(UpperCAmelCase__ ) and (quotient % factor == 0):
ans.append(UpperCAmelCase__ )
quotient /= factor
else:
factor += 1
else:
ans.append(UpperCAmelCase__ )
# precondition
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ ), "'ans' must been from type list"
return ans
def UpperCAmelCase_ ( UpperCAmelCase__ ):
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and (
number >= 0
), "'number' bust been an int and >= 0"
lowercase_ = 0
# prime factorization of 'number'
lowercase_ = prime_factorization(UpperCAmelCase__ )
lowercase_ = max(UpperCAmelCase__ )
# precondition
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ ), "'ans' must been from type int"
return ans
def UpperCAmelCase_ ( UpperCAmelCase__ ):
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and (
number >= 0
), "'number' bust been an int and >= 0"
lowercase_ = 0
# prime factorization of 'number'
lowercase_ = prime_factorization(UpperCAmelCase__ )
lowercase_ = min(UpperCAmelCase__ )
# precondition
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ ), "'ans' must been from type int"
return ans
def UpperCAmelCase_ ( UpperCAmelCase__ ):
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ ), "'number' must been an int"
assert isinstance(number % 2 == 0 , UpperCAmelCase__ ), "compare bust been from type bool"
return number % 2 == 0
def UpperCAmelCase_ ( UpperCAmelCase__ ):
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ ), "'number' must been an int"
assert isinstance(number % 2 != 0 , UpperCAmelCase__ ), "compare bust been from type bool"
return number % 2 != 0
def UpperCAmelCase_ ( UpperCAmelCase__ ):
assert (
isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and (number > 2) and is_even(UpperCAmelCase__ )
), "'number' must been an int, even and > 2"
lowercase_ = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
lowercase_ = get_prime_numbers(UpperCAmelCase__ )
lowercase_ = len(UpperCAmelCase__ )
# run variable for while-loops.
lowercase_ = 0
lowercase_ = None
# exit variable. for break up the loops
lowercase_ = True
while i < len_pn and loop:
lowercase_ = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
lowercase_ = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
and (len(UpperCAmelCase__ ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ):
assert (
isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
and isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
lowercase_ = 0
while numbera != 0:
lowercase_ = numbera % numbera
lowercase_ = numbera
lowercase_ = rest
# precondition
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ):
assert (
isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
and isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
lowercase_ = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
lowercase_ = prime_factorization(UpperCAmelCase__ )
lowercase_ = prime_factorization(UpperCAmelCase__ )
elif numbera == 1 or numbera == 1:
lowercase_ = []
lowercase_ = []
lowercase_ = max(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase_ = 0
lowercase_ = 0
lowercase_ = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
lowercase_ = prime_fac_a.count(UpperCAmelCase__ )
lowercase_ = prime_fac_a.count(UpperCAmelCase__ )
for _ in range(max(UpperCAmelCase__ , UpperCAmelCase__ ) ):
ans *= n
else:
lowercase_ = prime_fac_a.count(UpperCAmelCase__ )
for _ in range(UpperCAmelCase__ ):
ans *= n
done.append(UpperCAmelCase__ )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
lowercase_ = prime_fac_a.count(UpperCAmelCase__ )
for _ in range(UpperCAmelCase__ ):
ans *= n
done.append(UpperCAmelCase__ )
# precondition
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def UpperCAmelCase_ ( UpperCAmelCase__ ):
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and (n >= 0), "'number' must been a positive int"
lowercase_ = 0
lowercase_ = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(UpperCAmelCase__ ):
ans += 1
# precondition
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and is_prime(
UpperCAmelCase__ ), "'ans' must been a prime number and from type int"
return ans
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ):
assert (
is_prime(UpperCAmelCase__ ) and is_prime(UpperCAmelCase__ ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
lowercase_ = p_number_a + 1 # jump to the next number
lowercase_ = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(UpperCAmelCase__ ):
number += 1
while number < p_number_a:
ans.append(UpperCAmelCase__ )
number += 1
# fetch the next prime number.
while not is_prime(UpperCAmelCase__ ):
number += 1
# precondition
assert (
isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
and ans[0] != p_number_a
and ans[len(UpperCAmelCase__ ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def UpperCAmelCase_ ( UpperCAmelCase__ ):
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and (n >= 1), "'n' must been int and >= 1"
lowercase_ = [] # will be returned.
for divisor in range(1 , n + 1 ):
if n % divisor == 0:
ans.append(UpperCAmelCase__ )
# precondition
assert ans[0] == 1 and ans[len(UpperCAmelCase__ ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def UpperCAmelCase_ ( UpperCAmelCase__ ):
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and (
number > 1
), "'number' must been an int and >= 1"
lowercase_ = get_divisors(UpperCAmelCase__ )
# precondition
assert (
isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
and (divisors[0] == 1)
and (divisors[len(UpperCAmelCase__ ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ):
assert (
isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
and isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
lowercase_ = gcd(abs(UpperCAmelCase__ ) , abs(UpperCAmelCase__ ) )
# precondition
assert (
isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def UpperCAmelCase_ ( UpperCAmelCase__ ):
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and (n >= 0), "'n' must been a int and >= 0"
lowercase_ = 1 # this will be return.
for factor in range(1 , n + 1 ):
ans *= factor
return ans
def UpperCAmelCase_ ( UpperCAmelCase__ ):
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and (n >= 0), "'n' must been an int and >= 0"
lowercase_ = 0
lowercase_ = 1
lowercase_ = 1 # this will be return
for _ in range(n - 1 ):
lowercase_ = ans
ans += fiba
lowercase_ = tmp
return ans
| 650
|
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
a = TypeVar('T')
class UpperCamelCase__ ( Generic[T] ):
__SCREAMING_SNAKE_CASE : deque[T] # Cache store of keys
__SCREAMING_SNAKE_CASE : set[T] # References of the keys in cache
__SCREAMING_SNAKE_CASE : int = 10 # Maximum capacity of cache
def __init__( self : str , UpperCamelCase__ : int ):
'''simple docstring'''
lowercase_ = deque()
lowercase_ = set()
if not n:
lowercase_ = sys.maxsize
elif n < 0:
raise ValueError("""n should be an integer greater than 0.""" )
else:
lowercase_ = n
def UpperCAmelCase__ ( self : Optional[int] , UpperCamelCase__ : T ):
'''simple docstring'''
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
lowercase_ = self.dq_store.pop()
self.key_reference.remove(UpperCamelCase__ )
else:
self.dq_store.remove(UpperCamelCase__ )
self.dq_store.appendleft(UpperCamelCase__ )
self.key_reference.add(UpperCamelCase__ )
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
for k in self.dq_store:
print(UpperCamelCase__ )
def __repr__( self : Optional[Any] ):
'''simple docstring'''
return F'''LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}'''
if __name__ == "__main__":
import doctest
doctest.testmod()
a = LRUCache(4)
lru_cache.refer('A')
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer('A')
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 650
| 1
|
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
a = logging.getLogger(__name__)
torch.set_grad_enabled(False)
a = 'cuda' if torch.cuda.is_available() else 'cpu'
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__=1_0_0 , UpperCAmelCase__=" " ):
lowercase_ = text.split(UpperCAmelCase__ )
return [character.join(text[i : i + n] ).strip() for i in range(0 , len(UpperCAmelCase__ ) , UpperCAmelCase__ )]
def UpperCAmelCase_ ( UpperCAmelCase__ ):
lowercase_ , lowercase_ = [], []
for title, text in zip(documents["""title"""] , documents["""text"""] ):
if text is not None:
for passage in split_text(UpperCAmelCase__ ):
titles.append(title if title is not None else """""" )
texts.append(UpperCAmelCase__ )
return {"title": titles, "text": texts}
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase_ = ctx_tokenizer(
documents["""title"""] , documents["""text"""] , truncation=UpperCAmelCase__ , padding="""longest""" , return_tensors="""pt""" )["""input_ids"""]
lowercase_ = ctx_encoder(input_ids.to(device=UpperCAmelCase__ ) , return_dict=UpperCAmelCase__ ).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , ):
######################################
logger.info("""Step 1 - Create the dataset""" )
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
lowercase_ = load_dataset(
"""csv""" , data_files=[rag_example_args.csv_path] , split="""train""" , delimiter="""\t""" , column_names=["""title""", """text"""] )
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
lowercase_ = dataset.map(UpperCAmelCase__ , batched=UpperCAmelCase__ , num_proc=processing_args.num_proc )
# And compute the embeddings
lowercase_ = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=UpperCAmelCase__ )
lowercase_ = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name )
lowercase_ = Features(
{"""text""": Value("""string""" ), """title""": Value("""string""" ), """embeddings""": Sequence(Value("""float32""" ) )} ) # optional, save as float32 instead of float64 to save space
lowercase_ = dataset.map(
partial(UpperCAmelCase__ , ctx_encoder=UpperCAmelCase__ , ctx_tokenizer=UpperCAmelCase__ ) , batched=UpperCAmelCase__ , batch_size=processing_args.batch_size , features=UpperCAmelCase__ , )
# And finally save your dataset
lowercase_ = os.path.join(rag_example_args.output_dir , """my_knowledge_dataset""" )
dataset.save_to_disk(UpperCAmelCase__ )
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info("""Step 2 - Index the dataset""" )
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
lowercase_ = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT )
dataset.add_faiss_index("""embeddings""" , custom_index=UpperCAmelCase__ )
# And save the index
lowercase_ = os.path.join(rag_example_args.output_dir , """my_knowledge_dataset_hnsw_index.faiss""" )
dataset.get_index("""embeddings""" ).save(UpperCAmelCase__ )
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class UpperCamelCase__ :
__SCREAMING_SNAKE_CASE : str = field(
default=str(Path(__magic_name__ ).parent / 'test_run' / 'dummy-kb' / 'my_knowledge_dataset.csv' ) , metadata={'help': 'Path to a tab-separated csv file with columns \'title\' and \'text\''} , )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__magic_name__ , metadata={'help': 'Question that is passed as input to RAG. Default is \'What does Moses\' rod turn into ?\'.'} , )
__SCREAMING_SNAKE_CASE : str = field(
default='facebook/rag-sequence-nq' , metadata={'help': 'The RAG model to use. Either \'facebook/rag-sequence-nq\' or \'facebook/rag-token-nq\''} , )
__SCREAMING_SNAKE_CASE : str = field(
default='facebook/dpr-ctx_encoder-multiset-base' , metadata={
'help': (
'The DPR context encoder model to use. Either \'facebook/dpr-ctx_encoder-single-nq-base\' or'
' \'facebook/dpr-ctx_encoder-multiset-base\''
)
} , )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=str(Path(__magic_name__ ).parent / 'test_run' / 'dummy-kb' ) , metadata={'help': 'Path to a directory where the dataset passages and the index will be saved'} , )
@dataclass
class UpperCamelCase__ :
__SCREAMING_SNAKE_CASE : Optional[int] = field(
default=__magic_name__ , metadata={
'help': 'The number of processes to use to split the documents into passages. Default is single process.'
} , )
__SCREAMING_SNAKE_CASE : int = field(
default=16 , metadata={
'help': 'The batch size to use when computing the passages embeddings using the DPR context encoder.'
} , )
@dataclass
class UpperCamelCase__ :
__SCREAMING_SNAKE_CASE : int = field(
default=768 , metadata={'help': 'The dimension of the embeddings to pass to the HNSW Faiss index.'} , )
__SCREAMING_SNAKE_CASE : int = field(
default=128 , metadata={
'help': (
'The number of bi-directional links created for every new element during the HNSW index construction.'
)
} , )
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
a = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
a , a , a = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
a = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args)
| 650
|
def UpperCAmelCase_ ( UpperCAmelCase__ ):
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 650
| 1
|
from ..utils import DummyObject, requires_backends
class UpperCamelCase__ ( metaclass=__magic_name__ ):
__SCREAMING_SNAKE_CASE : str = ['flax']
def __init__( self : Optional[int] , *UpperCamelCase__ : List[Any] , **UpperCamelCase__ : Dict ):
'''simple docstring'''
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase__ ( cls : Optional[int] , *UpperCamelCase__ : List[Any] , **UpperCamelCase__ : Dict ):
'''simple docstring'''
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase__ ( cls : List[Any] , *UpperCamelCase__ : str , **UpperCamelCase__ : Tuple ):
'''simple docstring'''
requires_backends(cls , ["""flax"""] )
class UpperCamelCase__ ( metaclass=__magic_name__ ):
__SCREAMING_SNAKE_CASE : Optional[int] = ['flax']
def __init__( self : Dict , *UpperCamelCase__ : List[Any] , **UpperCamelCase__ : Tuple ):
'''simple docstring'''
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase__ ( cls : str , *UpperCamelCase__ : Any , **UpperCamelCase__ : str ):
'''simple docstring'''
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase__ ( cls : Any , *UpperCamelCase__ : str , **UpperCamelCase__ : int ):
'''simple docstring'''
requires_backends(cls , ["""flax"""] )
class UpperCamelCase__ ( metaclass=__magic_name__ ):
__SCREAMING_SNAKE_CASE : Dict = ['flax']
def __init__( self : List[str] , *UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : Tuple ):
'''simple docstring'''
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase__ ( cls : Optional[int] , *UpperCamelCase__ : Optional[int] , **UpperCamelCase__ : Union[str, Any] ):
'''simple docstring'''
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase__ ( cls : List[str] , *UpperCamelCase__ : List[Any] , **UpperCamelCase__ : List[Any] ):
'''simple docstring'''
requires_backends(cls , ["""flax"""] )
class UpperCamelCase__ ( metaclass=__magic_name__ ):
__SCREAMING_SNAKE_CASE : str = ['flax']
def __init__( self : Union[str, Any] , *UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : int ):
'''simple docstring'''
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase__ ( cls : Optional[Any] , *UpperCamelCase__ : str , **UpperCamelCase__ : List[Any] ):
'''simple docstring'''
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase__ ( cls : Union[str, Any] , *UpperCamelCase__ : Any , **UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
requires_backends(cls , ["""flax"""] )
class UpperCamelCase__ ( metaclass=__magic_name__ ):
__SCREAMING_SNAKE_CASE : Optional[Any] = ['flax']
def __init__( self : str , *UpperCamelCase__ : Dict , **UpperCamelCase__ : Tuple ):
'''simple docstring'''
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase__ ( cls : Optional[int] , *UpperCamelCase__ : Union[str, Any] , **UpperCamelCase__ : Tuple ):
'''simple docstring'''
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase__ ( cls : List[str] , *UpperCamelCase__ : Dict , **UpperCamelCase__ : List[Any] ):
'''simple docstring'''
requires_backends(cls , ["""flax"""] )
class UpperCamelCase__ ( metaclass=__magic_name__ ):
__SCREAMING_SNAKE_CASE : int = ['flax']
def __init__( self : Tuple , *UpperCamelCase__ : int , **UpperCamelCase__ : Union[str, Any] ):
'''simple docstring'''
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase__ ( cls : Optional[int] , *UpperCamelCase__ : int , **UpperCamelCase__ : Union[str, Any] ):
'''simple docstring'''
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase__ ( cls : List[Any] , *UpperCamelCase__ : str , **UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
requires_backends(cls , ["""flax"""] )
class UpperCamelCase__ ( metaclass=__magic_name__ ):
__SCREAMING_SNAKE_CASE : List[Any] = ['flax']
def __init__( self : str , *UpperCamelCase__ : int , **UpperCamelCase__ : Tuple ):
'''simple docstring'''
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase__ ( cls : Tuple , *UpperCamelCase__ : Dict , **UpperCamelCase__ : Optional[int] ):
'''simple docstring'''
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase__ ( cls : Tuple , *UpperCamelCase__ : List[str] , **UpperCamelCase__ : List[Any] ):
'''simple docstring'''
requires_backends(cls , ["""flax"""] )
class UpperCamelCase__ ( metaclass=__magic_name__ ):
__SCREAMING_SNAKE_CASE : Optional[int] = ['flax']
def __init__( self : int , *UpperCamelCase__ : Any , **UpperCamelCase__ : str ):
'''simple docstring'''
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase__ ( cls : List[str] , *UpperCamelCase__ : List[str] , **UpperCamelCase__ : Union[str, Any] ):
'''simple docstring'''
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase__ ( cls : List[Any] , *UpperCamelCase__ : int , **UpperCamelCase__ : Optional[int] ):
'''simple docstring'''
requires_backends(cls , ["""flax"""] )
class UpperCamelCase__ ( metaclass=__magic_name__ ):
__SCREAMING_SNAKE_CASE : Dict = ['flax']
def __init__( self : Union[str, Any] , *UpperCamelCase__ : str , **UpperCamelCase__ : Tuple ):
'''simple docstring'''
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase__ ( cls : str , *UpperCamelCase__ : Tuple , **UpperCamelCase__ : Dict ):
'''simple docstring'''
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase__ ( cls : Optional[int] , *UpperCamelCase__ : Dict , **UpperCamelCase__ : Any ):
'''simple docstring'''
requires_backends(cls , ["""flax"""] )
class UpperCamelCase__ ( metaclass=__magic_name__ ):
__SCREAMING_SNAKE_CASE : Any = ['flax']
def __init__( self : Tuple , *UpperCamelCase__ : str , **UpperCamelCase__ : Union[str, Any] ):
'''simple docstring'''
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase__ ( cls : Optional[int] , *UpperCamelCase__ : Union[str, Any] , **UpperCamelCase__ : List[Any] ):
'''simple docstring'''
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase__ ( cls : Optional[Any] , *UpperCamelCase__ : Optional[int] , **UpperCamelCase__ : List[Any] ):
'''simple docstring'''
requires_backends(cls , ["""flax"""] )
class UpperCamelCase__ ( metaclass=__magic_name__ ):
__SCREAMING_SNAKE_CASE : Dict = ['flax']
def __init__( self : Dict , *UpperCamelCase__ : List[str] , **UpperCamelCase__ : Union[str, Any] ):
'''simple docstring'''
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase__ ( cls : Tuple , *UpperCamelCase__ : List[str] , **UpperCamelCase__ : Union[str, Any] ):
'''simple docstring'''
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase__ ( cls : Any , *UpperCamelCase__ : Tuple , **UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
requires_backends(cls , ["""flax"""] )
class UpperCamelCase__ ( metaclass=__magic_name__ ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = ['flax']
def __init__( self : str , *UpperCamelCase__ : str , **UpperCamelCase__ : List[Any] ):
'''simple docstring'''
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase__ ( cls : Union[str, Any] , *UpperCamelCase__ : int , **UpperCamelCase__ : Union[str, Any] ):
'''simple docstring'''
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase__ ( cls : List[str] , *UpperCamelCase__ : Union[str, Any] , **UpperCamelCase__ : Optional[int] ):
'''simple docstring'''
requires_backends(cls , ["""flax"""] )
class UpperCamelCase__ ( metaclass=__magic_name__ ):
__SCREAMING_SNAKE_CASE : List[str] = ['flax']
def __init__( self : str , *UpperCamelCase__ : int , **UpperCamelCase__ : str ):
'''simple docstring'''
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase__ ( cls : List[str] , *UpperCamelCase__ : Optional[int] , **UpperCamelCase__ : Tuple ):
'''simple docstring'''
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase__ ( cls : Tuple , *UpperCamelCase__ : int , **UpperCamelCase__ : List[Any] ):
'''simple docstring'''
requires_backends(cls , ["""flax"""] )
| 650
|
def UpperCAmelCase_ ( UpperCAmelCase__=2_8_1_2_3 ):
lowercase_ = [1] * (limit + 1)
for i in range(2 , int(limit**0.5 ) + 1 ):
sum_divs[i * i] += i
for k in range(i + 1 , limit // i + 1 ):
sum_divs[k * i] += k + i
lowercase_ = set()
lowercase_ = 0
for n in range(1 , limit + 1 ):
if sum_divs[n] > n:
abundants.add(UpperCAmelCase__ )
if not any((n - a in abundants) for a in abundants ):
res += n
return res
if __name__ == "__main__":
print(solution())
| 650
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a = logging.get_logger(__name__)
a = {
'microsoft/biogpt': 'https://huggingface.co/microsoft/biogpt/resolve/main/config.json',
# See all BioGPT models at https://huggingface.co/models?filter=biogpt
}
class UpperCamelCase__ ( __magic_name__ ):
__SCREAMING_SNAKE_CASE : int = 'biogpt'
def __init__( self : Union[str, Any] , UpperCamelCase__ : Any=42_384 , UpperCamelCase__ : Optional[Any]=1_024 , UpperCamelCase__ : List[Any]=24 , UpperCamelCase__ : Any=16 , UpperCamelCase__ : Optional[int]=4_096 , UpperCamelCase__ : Optional[int]="gelu" , UpperCamelCase__ : str=0.1 , UpperCamelCase__ : Tuple=0.1 , UpperCamelCase__ : Tuple=1_024 , UpperCamelCase__ : Union[str, Any]=0.02 , UpperCamelCase__ : Union[str, Any]=1e-12 , UpperCamelCase__ : List[Any]=True , UpperCamelCase__ : List[Any]=True , UpperCamelCase__ : Union[str, Any]=0.0 , UpperCamelCase__ : Tuple=0.0 , UpperCamelCase__ : Tuple=1 , UpperCamelCase__ : Optional[Any]=0 , UpperCamelCase__ : Any=2 , **UpperCamelCase__ : List[str] , ):
'''simple docstring'''
lowercase_ = vocab_size
lowercase_ = max_position_embeddings
lowercase_ = hidden_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = intermediate_size
lowercase_ = hidden_act
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = initializer_range
lowercase_ = layer_norm_eps
lowercase_ = scale_embedding
lowercase_ = use_cache
lowercase_ = layerdrop
lowercase_ = activation_dropout
super().__init__(pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ )
| 650
|
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class UpperCamelCase__ :
def __init__( self : List[str] , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[str]=13 , UpperCamelCase__ : str=7 , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : str=False , UpperCamelCase__ : str=True , UpperCamelCase__ : Union[str, Any]=99 , UpperCamelCase__ : Dict=32 , UpperCamelCase__ : Union[str, Any]=5 , UpperCamelCase__ : Tuple=4 , UpperCamelCase__ : Dict=37 , UpperCamelCase__ : Any="gelu" , UpperCamelCase__ : Dict=0.1 , UpperCamelCase__ : Tuple=0.1 , UpperCamelCase__ : Tuple=512 , UpperCamelCase__ : Union[str, Any]=16 , UpperCamelCase__ : List[str]=2 , UpperCamelCase__ : int=0.02 , UpperCamelCase__ : int=3 , UpperCamelCase__ : Optional[Any]=4 , UpperCamelCase__ : int=None , ):
'''simple docstring'''
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = seq_length
lowercase_ = is_training
lowercase_ = use_input_mask
lowercase_ = use_token_type_ids
lowercase_ = use_labels
lowercase_ = vocab_size
lowercase_ = hidden_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = intermediate_size
lowercase_ = hidden_act
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = max_position_embeddings
lowercase_ = type_vocab_size
lowercase_ = type_sequence_label_size
lowercase_ = initializer_range
lowercase_ = num_labels
lowercase_ = num_choices
lowercase_ = scope
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase_ = None
if self.use_input_mask:
lowercase_ = random_attention_mask([self.batch_size, self.seq_length] )
lowercase_ = None
if self.use_token_type_ids:
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase_ = None
lowercase_ = None
lowercase_ = None
if self.use_labels:
lowercase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase_ = ids_tensor([self.batch_size] , self.num_choices )
lowercase_ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , use_stable_embedding=UpperCamelCase__ , )
def UpperCAmelCase__ ( self : str , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Dict , UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : str , UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
lowercase_ = OpenLlamaModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowercase_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ )
lowercase_ = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase__ ( self : Union[str, Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : str , UpperCamelCase__ : Tuple , UpperCamelCase__ : str , UpperCamelCase__ : int , UpperCamelCase__ : str , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any] , ):
'''simple docstring'''
lowercase_ = True
lowercase_ = OpenLlamaModel(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowercase_ = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , )
lowercase_ = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , )
lowercase_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase__ ( self : Any , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : str , UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Any , UpperCamelCase__ : List[Any] , ):
'''simple docstring'''
lowercase_ = OpenLlamaForCausalLM(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowercase_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase__ ( self : int , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : List[str] , UpperCamelCase__ : int , UpperCamelCase__ : str , UpperCamelCase__ : Tuple , ):
'''simple docstring'''
lowercase_ = True
lowercase_ = True
lowercase_ = OpenLlamaForCausalLM(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
# first forward pass
lowercase_ = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , use_cache=UpperCamelCase__ , )
lowercase_ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowercase_ = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowercase_ = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
lowercase_ = torch.cat([input_ids, next_tokens] , dim=-1 )
lowercase_ = torch.cat([input_mask, next_mask] , dim=-1 )
lowercase_ = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , output_hidden_states=UpperCamelCase__ , )["""hidden_states"""][0]
lowercase_ = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , past_key_values=UpperCamelCase__ , output_hidden_states=UpperCamelCase__ , )["""hidden_states"""][0]
# select random slice
lowercase_ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowercase_ = output_from_no_past[:, -3:, random_slice_idx].detach()
lowercase_ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-3 ) )
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ = self.prepare_config_and_inputs()
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) = config_and_inputs
lowercase_ = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
__SCREAMING_SNAKE_CASE : List[str] = (OpenLlamaForCausalLM,) if is_torch_available() else ()
__SCREAMING_SNAKE_CASE : List[Any] = (
{
'feature-extraction': OpenLlamaModel,
'text-classification': OpenLlamaForSequenceClassification,
'text-generation': OpenLlamaForCausalLM,
'zero-shot': OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
__SCREAMING_SNAKE_CASE : Tuple = False
__SCREAMING_SNAKE_CASE : Optional[int] = False
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
lowercase_ = OpenLlamaModelTester(self )
lowercase_ = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=37 )
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowercase_ = type
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ = 3
lowercase_ = input_dict["""input_ids"""]
lowercase_ = input_ids.ne(1 ).to(UpperCamelCase__ )
lowercase_ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowercase_ = OpenLlamaForSequenceClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowercase_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ = 3
lowercase_ = """single_label_classification"""
lowercase_ = input_dict["""input_ids"""]
lowercase_ = input_ids.ne(1 ).to(UpperCamelCase__ )
lowercase_ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowercase_ = OpenLlamaForSequenceClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowercase_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ = 3
lowercase_ = """multi_label_classification"""
lowercase_ = input_dict["""input_ids"""]
lowercase_ = input_ids.ne(1 ).to(UpperCamelCase__ )
lowercase_ = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
lowercase_ = OpenLlamaForSequenceClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowercase_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip("""Open-Llama buffers include complex numbers, which breaks this test""" )
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
pass
@parameterized.expand([("""linear""",), ("""dynamic""",)] )
def UpperCAmelCase__ ( self : Tuple , UpperCamelCase__ : Union[str, Any] ):
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ = ids_tensor([1, 10] , config.vocab_size )
lowercase_ = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowercase_ = OpenLlamaModel(UpperCamelCase__ )
original_model.to(UpperCamelCase__ )
original_model.eval()
lowercase_ = original_model(UpperCamelCase__ ).last_hidden_state
lowercase_ = original_model(UpperCamelCase__ ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowercase_ = {"""type""": scaling_type, """factor""": 10.0}
lowercase_ = OpenLlamaModel(UpperCamelCase__ )
scaled_model.to(UpperCamelCase__ )
scaled_model.eval()
lowercase_ = scaled_model(UpperCamelCase__ ).last_hidden_state
lowercase_ = scaled_model(UpperCamelCase__ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-5 ) )
| 650
| 1
|
def UpperCAmelCase_ ( UpperCAmelCase__ ):
if p < 2:
raise ValueError("""p should not be less than 2!""" )
elif p == 2:
return True
lowercase_ = 4
lowercase_ = (1 << p) - 1
for _ in range(p - 2 ):
lowercase_ = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(1_1))
| 650
|
import io
import math
from typing import Dict, Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
get_image_size,
infer_channel_dimension_format,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_vision_available, logging
from ...utils.import_utils import requires_backends
if is_vision_available():
import textwrap
from PIL import Image, ImageDraw, ImageFont
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
a = False
a = logging.get_logger(__name__)
a = 'ybelkada/fonts'
def UpperCAmelCase_ ( ):
if is_torch_available() and not is_torch_greater_or_equal_than_1_11:
raise ImportError(
F'''You are using torch=={torch.__version__}, but torch>=1.11.0 is required to use '''
"""Pix2StructImageProcessor. Please upgrade torch.""" )
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
requires_backends(UpperCAmelCase__ , ["""torch"""] )
_check_torch_version()
lowercase_ = image_tensor.unsqueeze(0 )
lowercase_ = torch.nn.functional.unfold(UpperCAmelCase__ , (patch_height, patch_width) , stride=(patch_height, patch_width) )
lowercase_ = patches.reshape(image_tensor.size(0 ) , image_tensor.size(1 ) , UpperCAmelCase__ , UpperCAmelCase__ , -1 )
lowercase_ = patches.permute(0 , 4 , 2 , 3 , 1 ).reshape(
image_tensor.size(2 ) // patch_height , image_tensor.size(3 ) // patch_width , image_tensor.size(1 ) * patch_height * patch_width , )
return patches.unsqueeze(0 )
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ = 3_6 , UpperCAmelCase__ = "black" , UpperCAmelCase__ = "white" , UpperCAmelCase__ = 5 , UpperCAmelCase__ = 5 , UpperCAmelCase__ = 5 , UpperCAmelCase__ = 5 , UpperCAmelCase__ = None , UpperCAmelCase__ = None , ):
requires_backends(UpperCAmelCase__ , """vision""" )
# Add new lines so that each line is no more than 80 characters.
lowercase_ = textwrap.TextWrapper(width=8_0 )
lowercase_ = wrapper.wrap(text=UpperCAmelCase__ )
lowercase_ = """\n""".join(UpperCAmelCase__ )
if font_bytes is not None and font_path is None:
lowercase_ = io.BytesIO(UpperCAmelCase__ )
elif font_path is not None:
lowercase_ = font_path
else:
lowercase_ = hf_hub_download(UpperCAmelCase__ , """Arial.TTF""" )
lowercase_ = ImageFont.truetype(UpperCAmelCase__ , encoding="""UTF-8""" , size=UpperCAmelCase__ )
# Use a temporary canvas to determine the width and height in pixels when
# rendering the text.
lowercase_ = ImageDraw.Draw(Image.new("""RGB""" , (1, 1) , UpperCAmelCase__ ) )
lowercase_ , lowercase_ , lowercase_ , lowercase_ = temp_draw.textbbox((0, 0) , UpperCAmelCase__ , UpperCAmelCase__ )
# Create the actual image with a bit of padding around the text.
lowercase_ = text_width + left_padding + right_padding
lowercase_ = text_height + top_padding + bottom_padding
lowercase_ = Image.new("""RGB""" , (image_width, image_height) , UpperCAmelCase__ )
lowercase_ = ImageDraw.Draw(UpperCAmelCase__ )
draw.text(xy=(left_padding, top_padding) , text=UpperCAmelCase__ , fill=UpperCAmelCase__ , font=UpperCAmelCase__ )
return image
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ):
requires_backends(UpperCAmelCase__ , """vision""" )
# Convert to PIL image if necessary
lowercase_ = to_pil_image(UpperCAmelCase__ )
lowercase_ = render_text(UpperCAmelCase__ , **UpperCAmelCase__ )
lowercase_ = max(header_image.width , image.width )
lowercase_ = int(image.height * (new_width / image.width) )
lowercase_ = int(header_image.height * (new_width / header_image.width) )
lowercase_ = Image.new("""RGB""" , (new_width, new_height + new_header_height) , """white""" )
new_image.paste(header_image.resize((new_width, new_header_height) ) , (0, 0) )
new_image.paste(image.resize((new_width, new_height) ) , (0, new_header_height) )
# Convert back to the original framework if necessary
lowercase_ = to_numpy_array(UpperCAmelCase__ )
if infer_channel_dimension_format(UpperCAmelCase__ ) == ChannelDimension.LAST:
lowercase_ = to_channel_dimension_format(UpperCAmelCase__ , ChannelDimension.LAST )
return new_image
class UpperCamelCase__ ( __magic_name__ ):
__SCREAMING_SNAKE_CASE : Tuple = ['flattened_patches']
def __init__( self : str , UpperCamelCase__ : bool = True , UpperCamelCase__ : bool = True , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : int = 2_048 , UpperCamelCase__ : bool = False , **UpperCamelCase__ : Optional[int] , ):
'''simple docstring'''
super().__init__(**UpperCamelCase__ )
lowercase_ = patch_size if patch_size is not None else {"""height""": 16, """width""": 16}
lowercase_ = do_normalize
lowercase_ = do_convert_rgb
lowercase_ = max_patches
lowercase_ = is_vqa
def UpperCAmelCase__ ( self : Optional[Any] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : int , UpperCamelCase__ : dict , **UpperCamelCase__ : Optional[int] ):
'''simple docstring'''
requires_backends(self.extract_flattened_patches , """torch""" )
_check_torch_version()
# convert to torch
lowercase_ = to_channel_dimension_format(UpperCamelCase__ , ChannelDimension.FIRST )
lowercase_ = torch.from_numpy(UpperCamelCase__ )
lowercase_ , lowercase_ = patch_size["""height"""], patch_size["""width"""]
lowercase_ , lowercase_ = get_image_size(UpperCamelCase__ )
# maximize scale s.t.
lowercase_ = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width) )
lowercase_ = max(min(math.floor(scale * image_height / patch_height ) , UpperCamelCase__ ) , 1 )
lowercase_ = max(min(math.floor(scale * image_width / patch_width ) , UpperCamelCase__ ) , 1 )
lowercase_ = max(num_feasible_rows * patch_height , 1 )
lowercase_ = max(num_feasible_cols * patch_width , 1 )
lowercase_ = torch.nn.functional.interpolate(
image.unsqueeze(0 ) , size=(resized_height, resized_width) , mode="""bilinear""" , align_corners=UpperCamelCase__ , antialias=UpperCamelCase__ , ).squeeze(0 )
# [1, rows, columns, patch_height * patch_width * image_channels]
lowercase_ = torch_extract_patches(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
lowercase_ = patches.shape
lowercase_ = patches_shape[1]
lowercase_ = patches_shape[2]
lowercase_ = patches_shape[3]
# [rows * columns, patch_height * patch_width * image_channels]
lowercase_ = patches.reshape([rows * columns, depth] )
# [rows * columns, 1]
lowercase_ = torch.arange(UpperCamelCase__ ).reshape([rows, 1] ).repeat(1 , UpperCamelCase__ ).reshape([rows * columns, 1] )
lowercase_ = torch.arange(UpperCamelCase__ ).reshape([1, columns] ).repeat(UpperCamelCase__ , 1 ).reshape([rows * columns, 1] )
# Offset by 1 so the ids do not contain zeros, which represent padding.
row_ids += 1
col_ids += 1
# Prepare additional patch features.
# [rows * columns, 1]
lowercase_ = row_ids.to(torch.floataa )
lowercase_ = col_ids.to(torch.floataa )
# [rows * columns, 2 + patch_height * patch_width * image_channels]
lowercase_ = torch.cat([row_ids, col_ids, patches] , -1 )
# [max_patches, 2 + patch_height * patch_width * image_channels]
lowercase_ = torch.nn.functional.pad(UpperCamelCase__ , [0, 0, 0, max_patches - (rows * columns)] ).float()
lowercase_ = to_numpy_array(UpperCamelCase__ )
return result
def UpperCAmelCase__ ( self : List[Any] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Dict ):
'''simple docstring'''
if image.dtype == np.uinta:
lowercase_ = image.astype(np.floataa )
# take mean across the whole `image`
lowercase_ = np.mean(UpperCamelCase__ )
lowercase_ = np.std(UpperCamelCase__ )
lowercase_ = max(UpperCamelCase__ , 1.0 / math.sqrt(np.prod(image.shape ) ) )
return normalize(UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ , **UpperCamelCase__ )
def UpperCAmelCase__ ( self : str , UpperCamelCase__ : ImageInput , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[Dict[str, int]] = None , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , UpperCamelCase__ : ChannelDimension = ChannelDimension.FIRST , **UpperCamelCase__ : Union[str, Any] , ):
'''simple docstring'''
lowercase_ = do_normalize if do_normalize is not None else self.do_normalize
lowercase_ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
lowercase_ = patch_size if patch_size is not None else self.patch_size
lowercase_ = max_patches if max_patches is not None else self.max_patches
lowercase_ = self.is_vqa
if kwargs.get("""data_format""" , UpperCamelCase__ ) is not None:
raise ValueError("""data_format is not an accepted input as the outputs are """ )
lowercase_ = make_list_of_images(UpperCamelCase__ )
if not valid_images(UpperCamelCase__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
lowercase_ = [convert_to_rgb(UpperCamelCase__ ) for image in images]
# All transformations expect numpy arrays.
lowercase_ = [to_numpy_array(UpperCamelCase__ ) for image in images]
if is_vqa:
if header_text is None:
raise ValueError("""A header text must be provided for VQA models.""" )
lowercase_ = kwargs.pop("""font_bytes""" , UpperCamelCase__ )
lowercase_ = kwargs.pop("""font_path""" , UpperCamelCase__ )
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
lowercase_ = [header_text] * len(UpperCamelCase__ )
lowercase_ = [
render_header(UpperCamelCase__ , header_text[i] , font_bytes=UpperCamelCase__ , font_path=UpperCamelCase__ )
for i, image in enumerate(UpperCamelCase__ )
]
if do_normalize:
lowercase_ = [self.normalize(image=UpperCamelCase__ ) for image in images]
# convert to torch tensor and permute
lowercase_ = [
self.extract_flattened_patches(image=UpperCamelCase__ , max_patches=UpperCamelCase__ , patch_size=UpperCamelCase__ )
for image in images
]
# create attention mask in numpy
lowercase_ = [(image.sum(axis=-1 ) != 0).astype(np.floataa ) for image in images]
lowercase_ = BatchFeature(
data={"""flattened_patches""": images, """attention_mask""": attention_masks} , tensor_type=UpperCamelCase__ )
return encoded_outputs
| 650
| 1
|
import argparse
import os
import re
a = 'src/diffusers'
# Pattern that looks at the indentation in a line.
a = re.compile(R'^(\s*)\S')
# Pattern that matches `"key":" and puts `key` in group 0.
a = re.compile(R'^\s*"([^"]+)":')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
a = re.compile(R'^\s*_import_structure\["([^"]+)"\]')
# Pattern that matches `"key",` and puts `key` in group 0.
a = re.compile(R'^\s*"([^"]+)",\s*$')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
a = re.compile(R'\[([^\]]+)\]')
def UpperCAmelCase_ ( UpperCAmelCase__ ):
lowercase_ = _re_indent.search(UpperCAmelCase__ )
return "" if search is None else search.groups()[0]
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__="" , UpperCAmelCase__=None , UpperCAmelCase__=None ):
lowercase_ = 0
lowercase_ = code.split("""\n""" )
if start_prompt is not None:
while not lines[index].startswith(UpperCAmelCase__ ):
index += 1
lowercase_ = ["""\n""".join(lines[:index] )]
else:
lowercase_ = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
lowercase_ = [lines[index]]
index += 1
while index < len(UpperCAmelCase__ ) and (end_prompt is None or not lines[index].startswith(UpperCAmelCase__ )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(UpperCAmelCase__ ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + """ """ ):
current_block.append(lines[index] )
blocks.append("""\n""".join(UpperCAmelCase__ ) )
if index < len(UpperCAmelCase__ ) - 1:
lowercase_ = [lines[index + 1]]
index += 1
else:
lowercase_ = []
else:
blocks.append("""\n""".join(UpperCAmelCase__ ) )
lowercase_ = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(UpperCAmelCase__ ) > 0:
blocks.append("""\n""".join(UpperCAmelCase__ ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(UpperCAmelCase__ ):
blocks.append("""\n""".join(lines[index:] ) )
return blocks
def UpperCAmelCase_ ( UpperCAmelCase__ ):
def _inner(UpperCAmelCase__ ):
return key(UpperCAmelCase__ ).lower().replace("""_""" , """""" )
return _inner
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__=None ):
# If no key is provided, we use a noop.
def noop(UpperCAmelCase__ ):
return x
if key is None:
lowercase_ = noop
# Constants are all uppercase, they go first.
lowercase_ = [obj for obj in objects if key(UpperCAmelCase__ ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
lowercase_ = [obj for obj in objects if key(UpperCAmelCase__ )[0].isupper() and not key(UpperCAmelCase__ ).isupper()]
# Functions begin with a lowercase, they go last.
lowercase_ = [obj for obj in objects if not key(UpperCAmelCase__ )[0].isupper()]
lowercase_ = ignore_underscore(UpperCAmelCase__ )
return sorted(UpperCAmelCase__ , key=UpperCAmelCase__ ) + sorted(UpperCAmelCase__ , key=UpperCAmelCase__ ) + sorted(UpperCAmelCase__ , key=UpperCAmelCase__ )
def UpperCAmelCase_ ( UpperCAmelCase__ ):
# This inner function sort imports between [ ].
def _replace(UpperCAmelCase__ ):
lowercase_ = match.groups()[0]
if "," not in imports:
return F'''[{imports}]'''
lowercase_ = [part.strip().replace("""\"""" , """""" ) for part in imports.split(""",""" )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
lowercase_ = keys[:-1]
return "[" + ", ".join([F'''"{k}"''' for k in sort_objects(UpperCAmelCase__ )] ) + "]"
lowercase_ = import_statement.split("""\n""" )
if len(UpperCAmelCase__ ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
lowercase_ = 2 if lines[1].strip() == """[""" else 1
lowercase_ = [(i, _re_strip_line.search(UpperCAmelCase__ ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
lowercase_ = sort_objects(UpperCAmelCase__ , key=lambda UpperCAmelCase__ : x[1] )
lowercase_ = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(UpperCAmelCase__ ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
lowercase_ = _re_bracket_content.sub(_replace , lines[1] )
else:
lowercase_ = [part.strip().replace("""\"""" , """""" ) for part in lines[1].split(""",""" )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
lowercase_ = keys[:-1]
lowercase_ = get_indent(lines[1] ) + """, """.join([F'''"{k}"''' for k in sort_objects(UpperCAmelCase__ )] )
return "\n".join(UpperCAmelCase__ )
else:
# Finally we have to deal with imports fitting on one line
lowercase_ = _re_bracket_content.sub(_replace , UpperCAmelCase__ )
return import_statement
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__=True ):
with open(UpperCAmelCase__ , """r""" ) as f:
lowercase_ = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
lowercase_ = split_code_in_indented_blocks(
UpperCAmelCase__ , start_prompt="""_import_structure = {""" , end_prompt="""if TYPE_CHECKING:""" )
# We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(UpperCAmelCase__ ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
lowercase_ = main_blocks[block_idx]
lowercase_ = block.split("""\n""" )
# Get to the start of the imports.
lowercase_ = 0
while line_idx < len(UpperCAmelCase__ ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
lowercase_ = len(UpperCAmelCase__ )
else:
line_idx += 1
if line_idx >= len(UpperCAmelCase__ ):
continue
# Ignore beginning and last line: they don't contain anything.
lowercase_ = """\n""".join(block_lines[line_idx:-1] )
lowercase_ = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
lowercase_ = split_code_in_indented_blocks(UpperCAmelCase__ , indent_level=UpperCAmelCase__ )
# We have two categories of import key: list or _import_structure[key].append/extend
lowercase_ = _re_direct_key if """_import_structure""" in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
lowercase_ = [(pattern.search(UpperCAmelCase__ ).groups()[0] if pattern.search(UpperCAmelCase__ ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
lowercase_ = [(i, key) for i, key in enumerate(UpperCAmelCase__ ) if key is not None]
lowercase_ = [x[0] for x in sorted(UpperCAmelCase__ , key=lambda UpperCAmelCase__ : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
lowercase_ = 0
lowercase_ = []
for i in range(len(UpperCAmelCase__ ) ):
if keys[i] is None:
reordered_blocks.append(internal_blocks[i] )
else:
lowercase_ = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reordered_blocks.append(UpperCAmelCase__ )
count += 1
# And we put our main block back together with its first and last line.
lowercase_ = """\n""".join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] )
if code != "\n".join(UpperCAmelCase__ ):
if check_only:
return True
else:
print(F'''Overwriting {file}.''' )
with open(UpperCAmelCase__ , """w""" ) as f:
f.write("""\n""".join(UpperCAmelCase__ ) )
def UpperCAmelCase_ ( UpperCAmelCase__=True ):
lowercase_ = []
for root, _, files in os.walk(UpperCAmelCase__ ):
if "__init__.py" in files:
lowercase_ = sort_imports(os.path.join(UpperCAmelCase__ , """__init__.py""" ) , check_only=UpperCAmelCase__ )
if result:
lowercase_ = [os.path.join(UpperCAmelCase__ , """__init__.py""" )]
if len(UpperCAmelCase__ ) > 0:
raise ValueError(F'''Would overwrite {len(UpperCAmelCase__ )} files, run `make style`.''' )
if __name__ == "__main__":
a = argparse.ArgumentParser()
parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.')
a = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 650
|
import cva
import numpy as np
class UpperCamelCase__ :
def __init__( self : List[str] , UpperCamelCase__ : float , UpperCamelCase__ : int ):
'''simple docstring'''
if k in (0.04, 0.06):
lowercase_ = k
lowercase_ = window_size
else:
raise ValueError("""invalid k value""" )
def __str__( self : Optional[int] ):
'''simple docstring'''
return str(self.k )
def UpperCAmelCase__ ( self : Optional[Any] , UpperCamelCase__ : str ):
'''simple docstring'''
lowercase_ = cva.imread(UpperCamelCase__ , 0 )
lowercase_ , lowercase_ = img.shape
lowercase_ = []
lowercase_ = img.copy()
lowercase_ = cva.cvtColor(UpperCamelCase__ , cva.COLOR_GRAY2RGB )
lowercase_ , lowercase_ = np.gradient(UpperCamelCase__ )
lowercase_ = dx**2
lowercase_ = dy**2
lowercase_ = dx * dy
lowercase_ = 0.04
lowercase_ = self.window_size // 2
for y in range(UpperCamelCase__ , h - offset ):
for x in range(UpperCamelCase__ , w - offset ):
lowercase_ = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowercase_ = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowercase_ = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowercase_ = (wxx * wyy) - (wxy**2)
lowercase_ = wxx + wyy
lowercase_ = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 255 )
return color_img, corner_list
if __name__ == "__main__":
a = HarrisCorner(0.04, 3)
a , a = edge_detect.detect('path_to_image')
cva.imwrite('detect.png', color_img)
| 650
| 1
|
import os
from collections import namedtuple
import pytest
from datasets import ClassLabel, Features, Sequence, Value
from datasets.commands.test import TestCommand
from datasets.info import DatasetInfo, DatasetInfosDict
a = namedtuple(
'_TestCommandArgs',
[
'dataset',
'name',
'cache_dir',
'data_dir',
'all_configs',
'save_infos',
'ignore_verifications',
'force_redownload',
'clear_cache',
],
defaults=[None, None, None, False, False, False, False, False],
)
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ):
return (abs(source - target ) / target) < 0.01
@pytest.mark.integration
def UpperCAmelCase_ ( UpperCAmelCase__ ):
lowercase_ = _TestCommandArgs(dataset=UpperCAmelCase__ , all_configs=UpperCAmelCase__ , save_infos=UpperCAmelCase__ )
lowercase_ = TestCommand(*UpperCAmelCase__ )
test_command.run()
lowercase_ = os.path.join(UpperCAmelCase__ , """README.md""" )
assert os.path.exists(UpperCAmelCase__ )
lowercase_ = DatasetInfosDict.from_directory(UpperCAmelCase__ )
lowercase_ = DatasetInfosDict(
{
"""default""": DatasetInfo(
features=Features(
{
"""tokens""": Sequence(Value("""string""" ) ),
"""ner_tags""": Sequence(
ClassLabel(names=["""O""", """B-PER""", """I-PER""", """B-ORG""", """I-ORG""", """B-LOC""", """I-LOC"""] ) ),
"""langs""": Sequence(Value("""string""" ) ),
"""spans""": Sequence(Value("""string""" ) ),
} ) , splits=[
{
"""name""": """train""",
"""num_bytes""": 2_3_5_1_5_6_3,
"""num_examples""": 1_0_0_0_0,
},
{
"""name""": """validation""",
"""num_bytes""": 2_3_8_4_1_8,
"""num_examples""": 1_0_0_0,
},
] , download_size=3_9_4_0_6_8_0 , dataset_size=2_5_8_9_9_8_1 , )
} )
assert dataset_infos.keys() == expected_dataset_infos.keys()
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
lowercase_ , lowercase_ = getattr(dataset_infos["""default"""] , UpperCAmelCase__ ), getattr(expected_dataset_infos["""default"""] , UpperCAmelCase__ )
if key == "num_bytes":
assert is_apercent_close(UpperCAmelCase__ , UpperCAmelCase__ )
elif key == "splits":
assert list(UpperCAmelCase__ ) == list(UpperCAmelCase__ )
for split in result:
assert result[split].name == expected[split].name
assert result[split].num_examples == expected[split].num_examples
assert is_apercent_close(result[split].num_bytes , expected[split].num_bytes )
else:
result == expected
| 650
|
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse('9.1.0'):
a = {
'linear': PIL.Image.Resampling.BILINEAR,
'bilinear': PIL.Image.Resampling.BILINEAR,
'bicubic': PIL.Image.Resampling.BICUBIC,
'lanczos': PIL.Image.Resampling.LANCZOS,
'nearest': PIL.Image.Resampling.NEAREST,
}
else:
a = {
'linear': PIL.Image.LINEAR,
'bilinear': PIL.Image.BILINEAR,
'bicubic': PIL.Image.BICUBIC,
'lanczos': PIL.Image.LANCZOS,
'nearest': PIL.Image.NEAREST,
}
def UpperCAmelCase_ ( UpperCAmelCase__ ):
lowercase_ = (images / 2 + 0.5).clamp(0 , 1 )
lowercase_ = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
lowercase_ = numpy_to_pil(UpperCAmelCase__ )
return images
def UpperCAmelCase_ ( UpperCAmelCase__ ):
if images.ndim == 3:
lowercase_ = images[None, ...]
lowercase_ = (images * 2_5_5).round().astype("""uint8""" )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
lowercase_ = [Image.fromarray(image.squeeze() , mode="""L""" ) for image in images]
else:
lowercase_ = [Image.fromarray(UpperCAmelCase__ ) for image in images]
return pil_images
| 650
| 1
|
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
a = logging.get_logger(__name__)
a = {
'b0': efficientnet.EfficientNetBa,
'b1': efficientnet.EfficientNetBa,
'b2': efficientnet.EfficientNetBa,
'b3': efficientnet.EfficientNetBa,
'b4': efficientnet.EfficientNetBa,
'b5': efficientnet.EfficientNetBa,
'b6': efficientnet.EfficientNetBa,
'b7': efficientnet.EfficientNetBa,
}
a = {
'b0': {
'hidden_dim': 1_2_8_0,
'width_coef': 1.0,
'depth_coef': 1.0,
'image_size': 2_2_4,
'dropout_rate': 0.2,
'dw_padding': [],
},
'b1': {
'hidden_dim': 1_2_8_0,
'width_coef': 1.0,
'depth_coef': 1.1,
'image_size': 2_4_0,
'dropout_rate': 0.2,
'dw_padding': [1_6],
},
'b2': {
'hidden_dim': 1_4_0_8,
'width_coef': 1.1,
'depth_coef': 1.2,
'image_size': 2_6_0,
'dropout_rate': 0.3,
'dw_padding': [5, 8, 1_6],
},
'b3': {
'hidden_dim': 1_5_3_6,
'width_coef': 1.2,
'depth_coef': 1.4,
'image_size': 3_0_0,
'dropout_rate': 0.3,
'dw_padding': [5, 1_8],
},
'b4': {
'hidden_dim': 1_7_9_2,
'width_coef': 1.4,
'depth_coef': 1.8,
'image_size': 3_8_0,
'dropout_rate': 0.4,
'dw_padding': [6],
},
'b5': {
'hidden_dim': 2_0_4_8,
'width_coef': 1.6,
'depth_coef': 2.2,
'image_size': 4_5_6,
'dropout_rate': 0.4,
'dw_padding': [1_3, 2_7],
},
'b6': {
'hidden_dim': 2_3_0_4,
'width_coef': 1.8,
'depth_coef': 2.6,
'image_size': 5_2_8,
'dropout_rate': 0.5,
'dw_padding': [3_1],
},
'b7': {
'hidden_dim': 2_5_6_0,
'width_coef': 2.0,
'depth_coef': 3.1,
'image_size': 6_0_0,
'dropout_rate': 0.5,
'dw_padding': [1_8],
},
}
def UpperCAmelCase_ ( UpperCAmelCase__ ):
lowercase_ = EfficientNetConfig()
lowercase_ = CONFIG_MAP[model_name]["""hidden_dim"""]
lowercase_ = CONFIG_MAP[model_name]["""width_coef"""]
lowercase_ = CONFIG_MAP[model_name]["""depth_coef"""]
lowercase_ = CONFIG_MAP[model_name]["""image_size"""]
lowercase_ = CONFIG_MAP[model_name]["""dropout_rate"""]
lowercase_ = CONFIG_MAP[model_name]["""dw_padding"""]
lowercase_ = """huggingface/label-files"""
lowercase_ = """imagenet-1k-id2label.json"""
lowercase_ = 1_0_0_0
lowercase_ = json.load(open(hf_hub_download(UpperCAmelCase__ , UpperCAmelCase__ , repo_type="""dataset""" ) , """r""" ) )
lowercase_ = {int(UpperCAmelCase__ ): v for k, v in idalabel.items()}
lowercase_ = idalabel
lowercase_ = {v: k for k, v in idalabel.items()}
return config
def UpperCAmelCase_ ( ):
lowercase_ = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowercase_ = Image.open(requests.get(UpperCAmelCase__ , stream=UpperCAmelCase__ ).raw )
return im
def UpperCAmelCase_ ( UpperCAmelCase__ ):
lowercase_ = CONFIG_MAP[model_name]["""image_size"""]
lowercase_ = EfficientNetImageProcessor(
size={"""height""": size, """width""": size} , image_mean=[0.485, 0.456, 0.406] , image_std=[0.47853944, 0.4732864, 0.47434163] , do_center_crop=UpperCAmelCase__ , )
return preprocessor
def UpperCAmelCase_ ( UpperCAmelCase__ ):
lowercase_ = [v.split("""_""" )[0].split("""block""" )[1] for v in original_param_names if v.startswith("""block""" )]
lowercase_ = sorted(set(UpperCAmelCase__ ) )
lowercase_ = len(UpperCAmelCase__ )
lowercase_ = {b: str(UpperCAmelCase__ ) for b, i in zip(UpperCAmelCase__ , range(UpperCAmelCase__ ) )}
lowercase_ = []
rename_keys.append(("""stem_conv/kernel:0""", """embeddings.convolution.weight""") )
rename_keys.append(("""stem_bn/gamma:0""", """embeddings.batchnorm.weight""") )
rename_keys.append(("""stem_bn/beta:0""", """embeddings.batchnorm.bias""") )
rename_keys.append(("""stem_bn/moving_mean:0""", """embeddings.batchnorm.running_mean""") )
rename_keys.append(("""stem_bn/moving_variance:0""", """embeddings.batchnorm.running_var""") )
for b in block_names:
lowercase_ = block_name_mapping[b]
rename_keys.append((F'''block{b}_expand_conv/kernel:0''', F'''encoder.blocks.{hf_b}.expansion.expand_conv.weight''') )
rename_keys.append((F'''block{b}_expand_bn/gamma:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.weight''') )
rename_keys.append((F'''block{b}_expand_bn/beta:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.bias''') )
rename_keys.append(
(F'''block{b}_expand_bn/moving_mean:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.running_mean''') )
rename_keys.append(
(F'''block{b}_expand_bn/moving_variance:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.running_var''') )
rename_keys.append(
(F'''block{b}_dwconv/depthwise_kernel:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight''') )
rename_keys.append((F'''block{b}_bn/gamma:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight''') )
rename_keys.append((F'''block{b}_bn/beta:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias''') )
rename_keys.append(
(F'''block{b}_bn/moving_mean:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean''') )
rename_keys.append(
(F'''block{b}_bn/moving_variance:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var''') )
rename_keys.append((F'''block{b}_se_reduce/kernel:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.reduce.weight''') )
rename_keys.append((F'''block{b}_se_reduce/bias:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.reduce.bias''') )
rename_keys.append((F'''block{b}_se_expand/kernel:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.expand.weight''') )
rename_keys.append((F'''block{b}_se_expand/bias:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.expand.bias''') )
rename_keys.append(
(F'''block{b}_project_conv/kernel:0''', F'''encoder.blocks.{hf_b}.projection.project_conv.weight''') )
rename_keys.append((F'''block{b}_project_bn/gamma:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.weight''') )
rename_keys.append((F'''block{b}_project_bn/beta:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.bias''') )
rename_keys.append(
(F'''block{b}_project_bn/moving_mean:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.running_mean''') )
rename_keys.append(
(F'''block{b}_project_bn/moving_variance:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.running_var''') )
rename_keys.append(("""top_conv/kernel:0""", """encoder.top_conv.weight""") )
rename_keys.append(("""top_bn/gamma:0""", """encoder.top_bn.weight""") )
rename_keys.append(("""top_bn/beta:0""", """encoder.top_bn.bias""") )
rename_keys.append(("""top_bn/moving_mean:0""", """encoder.top_bn.running_mean""") )
rename_keys.append(("""top_bn/moving_variance:0""", """encoder.top_bn.running_var""") )
lowercase_ = {}
for item in rename_keys:
if item[0] in original_param_names:
lowercase_ = """efficientnet.""" + item[1]
lowercase_ = """classifier.weight"""
lowercase_ = """classifier.bias"""
return key_mapping
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
for key, value in tf_params.items():
if "normalization" in key:
continue
lowercase_ = key_mapping[key]
if "_conv" in key and "kernel" in key:
lowercase_ = torch.from_numpy(UpperCAmelCase__ ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
lowercase_ = torch.from_numpy(UpperCAmelCase__ ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
lowercase_ = torch.from_numpy(np.transpose(UpperCAmelCase__ ) )
else:
lowercase_ = torch.from_numpy(UpperCAmelCase__ )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(UpperCAmelCase__ )
@torch.no_grad()
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase_ = model_classes[model_name](
include_top=UpperCAmelCase__ , weights="""imagenet""" , input_tensor=UpperCAmelCase__ , input_shape=UpperCAmelCase__ , pooling=UpperCAmelCase__ , classes=1_0_0_0 , classifier_activation="""softmax""" , )
lowercase_ = original_model.trainable_variables
lowercase_ = original_model.non_trainable_variables
lowercase_ = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
lowercase_ = param.numpy()
lowercase_ = list(tf_params.keys() )
# Load HuggingFace model
lowercase_ = get_efficientnet_config(UpperCAmelCase__ )
lowercase_ = EfficientNetForImageClassification(UpperCAmelCase__ ).eval()
lowercase_ = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print("""Converting parameters...""" )
lowercase_ = rename_keys(UpperCAmelCase__ )
replace_params(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# Initialize preprocessor and preprocess input image
lowercase_ = convert_image_processor(UpperCAmelCase__ )
lowercase_ = preprocessor(images=prepare_img() , return_tensors="""pt""" )
# HF model inference
hf_model.eval()
with torch.no_grad():
lowercase_ = hf_model(**UpperCAmelCase__ )
lowercase_ = outputs.logits.detach().numpy()
# Original model inference
lowercase_ = False
lowercase_ = CONFIG_MAP[model_name]["""image_size"""]
lowercase_ = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
lowercase_ = image.img_to_array(UpperCAmelCase__ )
lowercase_ = np.expand_dims(UpperCAmelCase__ , axis=0 )
lowercase_ = original_model.predict(UpperCAmelCase__ )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1e-3 ), "The predicted logits are not the same."
print("""Model outputs match!""" )
if save_model:
# Create folder to save model
if not os.path.isdir(UpperCAmelCase__ ):
os.mkdir(UpperCAmelCase__ )
# Save converted model and image processor
hf_model.save_pretrained(UpperCAmelCase__ )
preprocessor.save_pretrained(UpperCAmelCase__ )
if push_to_hub:
# Push model and image processor to hub
print(F'''Pushing converted {model_name} to the hub...''' )
lowercase_ = F'''efficientnet-{model_name}'''
preprocessor.push_to_hub(UpperCAmelCase__ )
hf_model.push_to_hub(UpperCAmelCase__ )
if __name__ == "__main__":
a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='b0',
type=str,
help='Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='hf_model',
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--save_model', action='store_true', help='Save model to local')
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
a = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 650
|
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCamelCase__ ( __magic_name__ ):
__SCREAMING_SNAKE_CASE : str = (UnCLIPScheduler,)
def UpperCAmelCase__ ( self : int , **UpperCamelCase__ : int ):
'''simple docstring'''
lowercase_ = {
"""num_train_timesteps""": 1_000,
"""variance_type""": """fixed_small_log""",
"""clip_sample""": True,
"""clip_sample_range""": 1.0,
"""prediction_type""": """epsilon""",
}
config.update(**UpperCamelCase__ )
return config
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
for timesteps in [1, 5, 100, 1_000]:
self.check_over_configs(num_train_timesteps=UpperCamelCase__ )
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=UpperCamelCase__ )
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=UpperCamelCase__ )
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=UpperCamelCase__ )
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=UpperCamelCase__ )
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
for time_step in [0, 500, 999]:
for prev_timestep in [None, 5, 100, 250, 500, 750]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=UpperCamelCase__ , prev_timestep=UpperCamelCase__ )
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ = self.scheduler_classes[0]
lowercase_ = self.get_scheduler_config(variance_type="""fixed_small_log""" )
lowercase_ = scheduler_class(**UpperCamelCase__ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0000e-10 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_549_625 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.9_994_987 ) ) < 1e-5
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
lowercase_ = self.scheduler_classes[0]
lowercase_ = self.get_scheduler_config(variance_type="""learned_range""" )
lowercase_ = scheduler_class(**UpperCamelCase__ )
lowercase_ = 0.5
assert scheduler._get_variance(1 , predicted_variance=UpperCamelCase__ ) - -10.1_712_790 < 1e-5
assert scheduler._get_variance(487 , predicted_variance=UpperCamelCase__ ) - -5.7_998_052 < 1e-5
assert scheduler._get_variance(999 , predicted_variance=UpperCamelCase__ ) - -0.0_010_011 < 1e-5
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ = self.scheduler_classes[0]
lowercase_ = self.get_scheduler_config()
lowercase_ = scheduler_class(**UpperCamelCase__ )
lowercase_ = scheduler.timesteps
lowercase_ = self.dummy_model()
lowercase_ = self.dummy_sample_deter
lowercase_ = torch.manual_seed(0 )
for i, t in enumerate(UpperCamelCase__ ):
# 1. predict noise residual
lowercase_ = model(UpperCamelCase__ , UpperCamelCase__ )
# 2. predict previous mean of sample x_t-1
lowercase_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , generator=UpperCamelCase__ ).prev_sample
lowercase_ = pred_prev_sample
lowercase_ = torch.sum(torch.abs(UpperCamelCase__ ) )
lowercase_ = torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_sum.item() - 252.2_682_495 ) < 1e-2
assert abs(result_mean.item() - 0.3_284_743 ) < 1e-3
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
lowercase_ = self.scheduler_classes[0]
lowercase_ = self.get_scheduler_config()
lowercase_ = scheduler_class(**UpperCamelCase__ )
scheduler.set_timesteps(25 )
lowercase_ = scheduler.timesteps
lowercase_ = self.dummy_model()
lowercase_ = self.dummy_sample_deter
lowercase_ = torch.manual_seed(0 )
for i, t in enumerate(UpperCamelCase__ ):
# 1. predict noise residual
lowercase_ = model(UpperCamelCase__ , UpperCamelCase__ )
if i + 1 == timesteps.shape[0]:
lowercase_ = None
else:
lowercase_ = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
lowercase_ = scheduler.step(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , prev_timestep=UpperCamelCase__ , generator=UpperCamelCase__ ).prev_sample
lowercase_ = pred_prev_sample
lowercase_ = torch.sum(torch.abs(UpperCamelCase__ ) )
lowercase_ = torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_sum.item() - 258.2_044_983 ) < 1e-2
assert abs(result_mean.item() - 0.3_362_038 ) < 1e-3
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
pass
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
pass
| 650
| 1
|
# Imports
import numpy as np
class UpperCamelCase__ :
def __init__( self : Tuple , UpperCamelCase__ : Tuple=None , UpperCamelCase__ : Dict=None , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : Tuple=None , UpperCamelCase__ : str=None ):
'''simple docstring'''
self.set_matricies(red=UpperCamelCase__ , green=UpperCamelCase__ , blue=UpperCamelCase__ , red_edge=UpperCamelCase__ , nir=UpperCamelCase__ )
def UpperCAmelCase__ ( self : Dict , UpperCamelCase__ : Dict=None , UpperCamelCase__ : int=None , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : int=None , UpperCamelCase__ : int=None ):
'''simple docstring'''
if red is not None:
lowercase_ = red
if green is not None:
lowercase_ = green
if blue is not None:
lowercase_ = blue
if red_edge is not None:
lowercase_ = red_edge
if nir is not None:
lowercase_ = nir
return True
def UpperCAmelCase__ ( self : Any , UpperCamelCase__ : List[Any]="" , UpperCamelCase__ : Dict=None , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : Dict=None , UpperCamelCase__ : str=None , UpperCamelCase__ : Any=None ):
'''simple docstring'''
self.set_matricies(red=UpperCamelCase__ , green=UpperCamelCase__ , blue=UpperCamelCase__ , red_edge=UpperCamelCase__ , nir=UpperCamelCase__ )
lowercase_ = {
"""ARVI2""": self.arvaa,
"""CCCI""": self.ccci,
"""CVI""": self.cvi,
"""GLI""": self.gli,
"""NDVI""": self.ndvi,
"""BNDVI""": self.bndvi,
"""redEdgeNDVI""": self.red_edge_ndvi,
"""GNDVI""": self.gndvi,
"""GBNDVI""": self.gbndvi,
"""GRNDVI""": self.grndvi,
"""RBNDVI""": self.rbndvi,
"""PNDVI""": self.pndvi,
"""ATSAVI""": self.atsavi,
"""BWDRVI""": self.bwdrvi,
"""CIgreen""": self.ci_green,
"""CIrededge""": self.ci_rededge,
"""CI""": self.ci,
"""CTVI""": self.ctvi,
"""GDVI""": self.gdvi,
"""EVI""": self.evi,
"""GEMI""": self.gemi,
"""GOSAVI""": self.gosavi,
"""GSAVI""": self.gsavi,
"""Hue""": self.hue,
"""IVI""": self.ivi,
"""IPVI""": self.ipvi,
"""I""": self.i,
"""RVI""": self.rvi,
"""MRVI""": self.mrvi,
"""MSAVI""": self.m_savi,
"""NormG""": self.norm_g,
"""NormNIR""": self.norm_nir,
"""NormR""": self.norm_r,
"""NGRDI""": self.ngrdi,
"""RI""": self.ri,
"""S""": self.s,
"""IF""": self._if,
"""DVI""": self.dvi,
"""TVI""": self.tvi,
"""NDRE""": self.ndre,
}
try:
return funcs[index]()
except KeyError:
print("""Index not in the list!""" )
return False
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
return -0.18 + (1.17 * ((self.nir - self.red) / (self.nir + self.red)))
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / (
(self.nir - self.red) / (self.nir + self.red)
)
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
return self.nir * (self.red / (self.green**2))
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
return (2 * self.green - self.red - self.blue) / (
2 * self.green + self.red + self.blue
)
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
return (self.nir - self.red) / (self.nir + self.red)
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
return (self.nir - self.blue) / (self.nir + self.blue)
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
return (self.redEdge - self.red) / (self.redEdge + self.red)
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
return (self.nir - self.green) / (self.nir + self.green)
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
return (self.nir - (self.green + self.blue)) / (
self.nir + (self.green + self.blue)
)
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
return (self.nir - (self.green + self.red)) / (
self.nir + (self.green + self.red)
)
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red))
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
return (self.nir - (self.green + self.red + self.blue)) / (
self.nir + (self.green + self.red + self.blue)
)
def UpperCAmelCase__ ( self : List[str] , UpperCamelCase__ : Union[str, Any]=0.08 , UpperCamelCase__ : Optional[int]=1.22 , UpperCamelCase__ : Union[str, Any]=0.03 ):
'''simple docstring'''
return a * (
(self.nir - a * self.red - b)
/ (a * self.nir + self.red - a * b + x * (1 + a**2))
)
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue)
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
return (self.nir / self.green) - 1
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
return (self.nir / self.redEdge) - 1
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
return (self.red - self.blue) / self.red
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
lowercase_ = self.ndvi()
return ((ndvi + 0.5) / (abs(ndvi + 0.5 ))) * (abs(ndvi + 0.5 ) ** (1 / 2))
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
return self.nir - self.green
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
return 2.5 * (
(self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1)
)
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / (
self.nir + self.red + 0.5
)
return n * (1 - 0.25 * n) - (self.red - 0.125) / (1 - self.red)
def UpperCAmelCase__ ( self : Optional[int] , UpperCamelCase__ : Optional[Any]=0.16 ):
'''simple docstring'''
return (self.nir - self.green) / (self.nir + self.green + y)
def UpperCAmelCase__ ( self : Optional[Any] , UpperCamelCase__ : Any=0.5 ):
'''simple docstring'''
return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n)
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
return np.arctan(
((2 * self.red - self.green - self.blue) / 30.5) * (self.green - self.blue) )
def UpperCAmelCase__ ( self : Optional[int] , UpperCamelCase__ : Dict=None , UpperCamelCase__ : int=None ):
'''simple docstring'''
return (self.nir - b) / (a * self.red)
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1)
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
return (self.red + self.green + self.blue) / 30.5
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
return self.nir / self.red
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
return (self.rvi() - 1) / (self.rvi() + 1)
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
return (
(2 * self.nir + 1)
- ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2)
) / 2
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
return self.green / (self.nir + self.red + self.green)
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
return self.nir / (self.nir + self.red + self.green)
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
return self.red / (self.nir + self.red + self.green)
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
return (self.green - self.red) / (self.green + self.red)
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
return (self.red - self.green) / (self.red + self.green)
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
lowercase_ = np.max([np.max(self.red ), np.max(self.green ), np.max(self.blue )] )
lowercase_ = np.min([np.min(self.red ), np.min(self.green ), np.min(self.blue )] )
return (max_value - min_value) / max_value
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
return (2 * self.red - self.green - self.blue) / (self.green - self.blue)
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
return self.nir / self.red
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
return (self.ndvi() + 0.5) ** (1 / 2)
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
return (self.nir - self.redEdge) / (self.nir + self.redEdge)
| 650
|
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import numpy as np
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForMaskedImageModeling,
HfArgumentParser,
Trainer,
TrainingArguments,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
a = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt')
a = list(MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING.keys())
a = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class UpperCamelCase__ :
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default='cifar10' , metadata={'help': 'Name of a dataset from the datasets package'} )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__magic_name__ , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__magic_name__ , metadata={'help': 'The column name of the images in the files. If not set, will try to use \'image\' or \'img\'.'} , )
__SCREAMING_SNAKE_CASE : Optional[str] = field(default=__magic_name__ , metadata={'help': 'A folder containing the training data.'} )
__SCREAMING_SNAKE_CASE : Optional[str] = field(default=__magic_name__ , metadata={'help': 'A folder containing the validation data.'} )
__SCREAMING_SNAKE_CASE : Optional[float] = field(
default=0.1_5 , metadata={'help': 'Percent to split off of train for validation.'} )
__SCREAMING_SNAKE_CASE : int = field(default=32 , metadata={'help': 'The size of the square patches to use for masking.'} )
__SCREAMING_SNAKE_CASE : float = field(
default=0.6 , metadata={'help': 'Percentage of patches to mask.'} , )
__SCREAMING_SNAKE_CASE : Optional[int] = field(
default=__magic_name__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
__SCREAMING_SNAKE_CASE : Optional[int] = field(
default=__magic_name__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
lowercase_ = {}
if self.train_dir is not None:
lowercase_ = self.train_dir
if self.validation_dir is not None:
lowercase_ = self.validation_dir
lowercase_ = data_files if data_files else None
@dataclass
class UpperCamelCase__ :
__SCREAMING_SNAKE_CASE : str = field(
default=__magic_name__ , metadata={
'help': (
'The model checkpoint for weights initialization. Can be a local path to a pytorch_model.bin or a '
'checkpoint identifier on the hub. '
'Don\'t set if you want to train a model from scratch.'
)
} , )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__magic_name__ , metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(__magic_name__ )} , )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__magic_name__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__magic_name__ , metadata={
'help': (
'Override some existing default config settings when a model is trained from scratch. Example: '
'n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'
)
} , )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__magic_name__ , metadata={'help': 'Where do you want to store (cache) the pretrained models/datasets downloaded from the hub'} , )
__SCREAMING_SNAKE_CASE : str = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
__SCREAMING_SNAKE_CASE : str = field(default=__magic_name__ , metadata={'help': 'Name or path of preprocessor config.'} )
__SCREAMING_SNAKE_CASE : bool = field(
default=__magic_name__ , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
__SCREAMING_SNAKE_CASE : Optional[int] = field(
default=__magic_name__ , metadata={
'help': (
'The size (resolution) of each image. If not specified, will use `image_size` of the configuration.'
)
} , )
__SCREAMING_SNAKE_CASE : Optional[int] = field(
default=__magic_name__ , metadata={
'help': (
'The size (resolution) of each patch. If not specified, will use `patch_size` of the configuration.'
)
} , )
__SCREAMING_SNAKE_CASE : Optional[int] = field(
default=__magic_name__ , metadata={'help': 'Stride to use for the encoder.'} , )
class UpperCamelCase__ :
def __init__( self : Dict , UpperCamelCase__ : List[Any]=192 , UpperCamelCase__ : Tuple=32 , UpperCamelCase__ : Dict=4 , UpperCamelCase__ : str=0.6 ):
'''simple docstring'''
lowercase_ = input_size
lowercase_ = mask_patch_size
lowercase_ = model_patch_size
lowercase_ = mask_ratio
if self.input_size % self.mask_patch_size != 0:
raise ValueError("""Input size must be divisible by mask patch size""" )
if self.mask_patch_size % self.model_patch_size != 0:
raise ValueError("""Mask patch size must be divisible by model patch size""" )
lowercase_ = self.input_size // self.mask_patch_size
lowercase_ = self.mask_patch_size // self.model_patch_size
lowercase_ = self.rand_size**2
lowercase_ = int(np.ceil(self.token_count * self.mask_ratio ) )
def __call__( self : int ):
'''simple docstring'''
lowercase_ = np.random.permutation(self.token_count )[: self.mask_count]
lowercase_ = np.zeros(self.token_count , dtype=UpperCamelCase__ )
lowercase_ = 1
lowercase_ = mask.reshape((self.rand_size, self.rand_size) )
lowercase_ = mask.repeat(self.scale , axis=0 ).repeat(self.scale , axis=1 )
return torch.tensor(mask.flatten() )
def UpperCAmelCase_ ( UpperCAmelCase__ ):
lowercase_ = torch.stack([example["""pixel_values"""] for example in examples] )
lowercase_ = torch.stack([example["""mask"""] for example in examples] )
return {"pixel_values": pixel_values, "bool_masked_pos": mask}
def UpperCAmelCase_ ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowercase_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowercase_ , lowercase_ , lowercase_ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowercase_ , lowercase_ , lowercase_ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_mim""" , UpperCAmelCase__ , UpperCAmelCase__ )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowercase_ = training_args.get_process_log_level()
logger.setLevel(UpperCAmelCase__ )
transformers.utils.logging.set_verbosity(UpperCAmelCase__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
lowercase_ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowercase_ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Initialize our dataset.
lowercase_ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
lowercase_ = None if """validation""" in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , UpperCAmelCase__ ) and data_args.train_val_split > 0.0:
lowercase_ = ds["""train"""].train_test_split(data_args.train_val_split )
lowercase_ = split["""train"""]
lowercase_ = split["""test"""]
# Create config
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowercase_ = {
"""cache_dir""": model_args.cache_dir,
"""revision""": model_args.model_revision,
"""use_auth_token""": True if model_args.use_auth_token else None,
}
if model_args.config_name_or_path:
lowercase_ = AutoConfig.from_pretrained(model_args.config_name_or_path , **UpperCAmelCase__ )
elif model_args.model_name_or_path:
lowercase_ = AutoConfig.from_pretrained(model_args.model_name_or_path , **UpperCAmelCase__ )
else:
lowercase_ = CONFIG_MAPPING[model_args.model_type]()
logger.warning("""You are instantiating a new config instance from scratch.""" )
if model_args.config_overrides is not None:
logger.info(F'''Overriding config: {model_args.config_overrides}''' )
config.update_from_string(model_args.config_overrides )
logger.info(F'''New config: {config}''' )
# make sure the decoder_type is "simmim" (only relevant for BEiT)
if hasattr(UpperCAmelCase__ , """decoder_type""" ):
lowercase_ = """simmim"""
# adapt config
lowercase_ = model_args.image_size if model_args.image_size is not None else config.image_size
lowercase_ = model_args.patch_size if model_args.patch_size is not None else config.patch_size
lowercase_ = (
model_args.encoder_stride if model_args.encoder_stride is not None else config.encoder_stride
)
config.update(
{
"""image_size""": model_args.image_size,
"""patch_size""": model_args.patch_size,
"""encoder_stride""": model_args.encoder_stride,
} )
# create image processor
if model_args.image_processor_name:
lowercase_ = AutoImageProcessor.from_pretrained(model_args.image_processor_name , **UpperCAmelCase__ )
elif model_args.model_name_or_path:
lowercase_ = AutoImageProcessor.from_pretrained(model_args.model_name_or_path , **UpperCAmelCase__ )
else:
lowercase_ = {
conf.model_type: image_processor_class for conf, image_processor_class in IMAGE_PROCESSOR_MAPPING.items()
}
lowercase_ = IMAGE_PROCESSOR_TYPES[model_args.model_type]()
# create model
if model_args.model_name_or_path:
lowercase_ = AutoModelForMaskedImageModeling.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=UpperCAmelCase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info("""Training new model from scratch""" )
lowercase_ = AutoModelForMaskedImageModeling.from_config(UpperCAmelCase__ )
if training_args.do_train:
lowercase_ = ds["""train"""].column_names
else:
lowercase_ = ds["""validation"""].column_names
if data_args.image_column_name is not None:
lowercase_ = data_args.image_column_name
elif "image" in column_names:
lowercase_ = """image"""
elif "img" in column_names:
lowercase_ = """img"""
else:
lowercase_ = column_names[0]
# transformations as done in original SimMIM paper
# source: https://github.com/microsoft/SimMIM/blob/main/data/data_simmim.py
lowercase_ = Compose(
[
Lambda(lambda UpperCAmelCase__ : img.convert("""RGB""" ) if img.mode != "RGB" else img ),
RandomResizedCrop(model_args.image_size , scale=(0.67, 1.0) , ratio=(3.0 / 4.0, 4.0 / 3.0) ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
# create mask generator
lowercase_ = MaskGenerator(
input_size=model_args.image_size , mask_patch_size=data_args.mask_patch_size , model_patch_size=model_args.patch_size , mask_ratio=data_args.mask_ratio , )
def preprocess_images(UpperCAmelCase__ ):
lowercase_ = [transforms(UpperCAmelCase__ ) for image in examples[image_column_name]]
lowercase_ = [mask_generator() for i in range(len(examples[image_column_name] ) )]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError("""--do_train requires a train dataset""" )
if data_args.max_train_samples is not None:
lowercase_ = ds["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(UpperCAmelCase__ )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError("""--do_eval requires a validation dataset""" )
if data_args.max_eval_samples is not None:
lowercase_ = (
ds["""validation"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(UpperCAmelCase__ )
# Initialize our trainer
lowercase_ = Trainer(
model=UpperCAmelCase__ , args=UpperCAmelCase__ , train_dataset=ds["""train"""] if training_args.do_train else None , eval_dataset=ds["""validation"""] if training_args.do_eval else None , tokenizer=UpperCAmelCase__ , data_collator=UpperCAmelCase__ , )
# Training
if training_args.do_train:
lowercase_ = None
if training_args.resume_from_checkpoint is not None:
lowercase_ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowercase_ = last_checkpoint
lowercase_ = trainer.train(resume_from_checkpoint=UpperCAmelCase__ )
trainer.save_model()
trainer.log_metrics("""train""" , train_result.metrics )
trainer.save_metrics("""train""" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
lowercase_ = trainer.evaluate()
trainer.log_metrics("""eval""" , UpperCAmelCase__ )
trainer.save_metrics("""eval""" , UpperCAmelCase__ )
# Write model card and (optionally) push to hub
lowercase_ = {
"""finetuned_from""": model_args.model_name_or_path,
"""tasks""": """masked-image-modeling""",
"""dataset""": data_args.dataset_name,
"""tags""": ["""masked-image-modeling"""],
}
if training_args.push_to_hub:
trainer.push_to_hub(**UpperCAmelCase__ )
else:
trainer.create_model_card(**UpperCAmelCase__ )
if __name__ == "__main__":
main()
| 650
| 1
|
from sklearn.metrics import recall_score
import datasets
a = '\nRecall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:\nRecall = TP / (TP + FN)\nWhere TP is the true positives and FN is the false negatives.\n'
a = '\nArgs:\n- **predictions** (`list` of `int`): The predicted labels.\n- **references** (`list` of `int`): The ground truth labels.\n- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.\n- **pos_label** (`int`): The class label to use as the \'positive class\' when calculating the recall. Defaults to `1`.\n- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.\n - `\'binary\'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.\n - `\'micro\'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.\n - `\'macro\'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - `\'weighted\'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.\n - `\'samples\'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.\n- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .\n - `\'warn\'`: If there is a zero division, the return value is `0`, but warnings are also raised.\n - `0`: If there is a zero division, the return value is `0`.\n - `1`: If there is a zero division, the return value is `1`.\n\nReturns:\n- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.\n\nExamples:\n\n Example 1-A simple example with some errors\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])\n >>> print(results)\n {\'recall\': 0.6666666666666666}\n\n Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)\n >>> print(results)\n {\'recall\': 0.5}\n\n Example 3-The same example as Example 1, but with `sample_weight` included.\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)\n >>> print(results)\n {\'recall\': 0.55}\n\n Example 4-A multiclass example, using different averages.\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=\'macro\')\n >>> print(results)\n {\'recall\': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=\'micro\')\n >>> print(results)\n {\'recall\': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=\'weighted\')\n >>> print(results)\n {\'recall\': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {\'recall\': array([1., 0., 0.])}\n'
a = '\n@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase__ ( datasets.Metric ):
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""int32""" ) ),
"""references""": datasets.Sequence(datasets.Value("""int32""" ) ),
}
if self.config_name == """multilabel"""
else {
"""predictions""": datasets.Value("""int32""" ),
"""references""": datasets.Value("""int32""" ),
} ) , reference_urls=["""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html"""] , )
def UpperCAmelCase__ ( self : Any , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int=None , UpperCamelCase__ : str=1 , UpperCamelCase__ : Optional[Any]="binary" , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : Tuple="warn" , ):
'''simple docstring'''
lowercase_ = recall_score(
UpperCamelCase__ , UpperCamelCase__ , labels=UpperCamelCase__ , pos_label=UpperCamelCase__ , average=UpperCamelCase__ , sample_weight=UpperCamelCase__ , zero_division=UpperCamelCase__ , )
return {"recall": float(UpperCamelCase__ ) if score.size == 1 else score}
| 650
|
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
a = logging.get_logger(__name__)
class UpperCamelCase__ ( __magic_name__ ):
__SCREAMING_SNAKE_CASE : List[Any] = ['pixel_values']
def __init__( self : List[str] , UpperCamelCase__ : bool = True , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase__ : bool = True , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : bool = True , UpperCamelCase__ : Union[int, float] = 1 / 255 , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_MEAN , UpperCamelCase__ : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_STD , **UpperCamelCase__ : Dict , ):
'''simple docstring'''
super().__init__(**UpperCamelCase__ )
lowercase_ = size if size is not None else {"""shortest_edge""": 224}
lowercase_ = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ )
lowercase_ = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
lowercase_ = get_size_dict(UpperCamelCase__ , param_name="""crop_size""" )
lowercase_ = do_resize
lowercase_ = size
lowercase_ = resample
lowercase_ = do_center_crop
lowercase_ = crop_size
lowercase_ = do_rescale
lowercase_ = rescale_factor
lowercase_ = do_normalize
lowercase_ = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
lowercase_ = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def UpperCAmelCase__ ( self : int , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Optional[int] , ):
'''simple docstring'''
lowercase_ = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
lowercase_ = int((256 / 224) * size["""shortest_edge"""] )
lowercase_ = get_resize_output_image_size(UpperCamelCase__ , size=UpperCamelCase__ , default_to_square=UpperCamelCase__ )
lowercase_ = {"""height""": output_size[0], """width""": output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
F'''Size dict must have keys \'height\' and \'width\' or \'shortest_edge\'. Got {size_dict.keys()}''' )
return resize(
UpperCamelCase__ , size=(size_dict["""height"""], size_dict["""width"""]) , resample=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def UpperCAmelCase__ ( self : str , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : str , ):
'''simple docstring'''
lowercase_ = get_size_dict(UpperCamelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(F'''Size dict must have keys \'height\' and \'width\'. Got {size.keys()}''' )
return center_crop(UpperCamelCase__ , size=(size["""height"""], size["""width"""]) , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def UpperCAmelCase__ ( self : Optional[int] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Union[int, float] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : str , ):
'''simple docstring'''
return rescale(UpperCamelCase__ , scale=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def UpperCAmelCase__ ( self : List[str] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Optional[int] , ):
'''simple docstring'''
return normalize(UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def UpperCAmelCase__ ( self : Dict , UpperCamelCase__ : ImageInput , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[Dict[str, int]] = None , UpperCamelCase__ : PILImageResampling = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[Dict[str, int]] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[float] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[Union[float, Iterable[float]]] = None , UpperCamelCase__ : Optional[Union[float, Iterable[float]]] = None , UpperCamelCase__ : Optional[TensorType] = None , UpperCamelCase__ : ChannelDimension = ChannelDimension.FIRST , **UpperCamelCase__ : Optional[int] , ):
'''simple docstring'''
lowercase_ = do_resize if do_resize is not None else self.do_resize
lowercase_ = resample if resample is not None else self.resample
lowercase_ = do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase_ = do_rescale if do_rescale is not None else self.do_rescale
lowercase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase_ = do_normalize if do_normalize is not None else self.do_normalize
lowercase_ = image_mean if image_mean is not None else self.image_mean
lowercase_ = image_std if image_std is not None else self.image_std
lowercase_ = size if size is not None else self.size
lowercase_ = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ )
lowercase_ = crop_size if crop_size is not None else self.crop_size
lowercase_ = get_size_dict(UpperCamelCase__ , param_name="""crop_size""" )
lowercase_ = make_list_of_images(UpperCamelCase__ )
if not valid_images(UpperCamelCase__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
lowercase_ = [to_numpy_array(UpperCamelCase__ ) for image in images]
if do_resize:
lowercase_ = [self.resize(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) for image in images]
if do_center_crop:
lowercase_ = [self.center_crop(UpperCamelCase__ , UpperCamelCase__ ) for image in images]
if do_rescale:
lowercase_ = [self.rescale(UpperCamelCase__ , UpperCamelCase__ ) for image in images]
if do_normalize:
lowercase_ = [self.normalize(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) for image in images]
lowercase_ = [to_channel_dimension_format(UpperCamelCase__ , UpperCamelCase__ ) for image in images]
lowercase_ = {"""pixel_values""": images}
return BatchFeature(data=UpperCamelCase__ , tensor_type=UpperCamelCase__ )
| 650
| 1
|
import os
import unittest
from huggingface_hub.utils import are_progress_bars_disabled
import transformers.models.bart.tokenization_bart
from transformers import logging
from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context
from transformers.utils.logging import disable_progress_bar, enable_progress_bar
class UpperCamelCase__ ( unittest.TestCase ):
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
lowercase_ = logging.get_logger()
# the current default level is logging.WARNING
lowercase_ = logging.get_verbosity()
logging.set_verbosity_error()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_warning()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_info()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_debug()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
# restore to the original level
logging.set_verbosity(UpperCamelCase__ )
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
lowercase_ = logging.get_verbosity()
lowercase_ = logging.get_logger("""transformers.models.bart.tokenization_bart""" )
lowercase_ = """Testing 1, 2, 3"""
# should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`)
if level_origin <= logging.WARNING:
with CaptureLogger(UpperCamelCase__ ) as cl:
logger.warning(UpperCamelCase__ )
self.assertEqual(cl.out , msg + """\n""" )
# this is setting the level for all of `transformers.*` loggers
logging.set_verbosity_error()
# should not be able to log warnings
with CaptureLogger(UpperCamelCase__ ) as cl:
logger.warning(UpperCamelCase__ )
self.assertEqual(cl.out , """""" )
# should be able to log warnings again
logging.set_verbosity_warning()
with CaptureLogger(UpperCamelCase__ ) as cl:
logger.warning(UpperCamelCase__ )
self.assertEqual(cl.out , msg + """\n""" )
# restore to the original level
logging.set_verbosity(UpperCamelCase__ )
@mockenv(TRANSFORMERS_VERBOSITY="""error""" )
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
transformers.utils.logging._reset_library_root_logger()
# this action activates the env var
lowercase_ = logging.get_logger("""transformers.models.bart.tokenization_bart""" )
lowercase_ = os.getenv("""TRANSFORMERS_VERBOSITY""" , UpperCamelCase__ )
lowercase_ = logging.log_levels[env_level_str]
lowercase_ = logging.get_verbosity()
self.assertEqual(
UpperCamelCase__ , UpperCamelCase__ , F'''TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}''' , )
# restore to the original level
lowercase_ = """"""
transformers.utils.logging._reset_library_root_logger()
@mockenv(TRANSFORMERS_VERBOSITY="""super-error""" )
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
transformers.utils.logging._reset_library_root_logger()
lowercase_ = logging.logging.getLogger()
with CaptureLogger(UpperCamelCase__ ) as cl:
# this action activates the env var
logging.get_logger("""transformers.models.bart.tokenization_bart""" )
self.assertIn("""Unknown option TRANSFORMERS_VERBOSITY=super-error""" , cl.out )
# no need to restore as nothing was changed
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
transformers.utils.logging._reset_library_root_logger()
lowercase_ = logging.get_logger("""transformers.models.bart.tokenization_bart""" )
lowercase_ = """Testing 1, 2, 3"""
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS="""1""" ):
# nothing should be logged as env var disables this method
with CaptureLogger(UpperCamelCase__ ) as cl:
logger.warning_advice(UpperCamelCase__ )
self.assertEqual(cl.out , """""" )
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS="""""" ):
# should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset
with CaptureLogger(UpperCamelCase__ ) as cl:
logger.warning_advice(UpperCamelCase__ )
self.assertEqual(cl.out , msg + """\n""" )
def UpperCAmelCase_ ( ):
disable_progress_bar()
assert are_progress_bars_disabled()
enable_progress_bar()
assert not are_progress_bars_disabled()
| 650
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
a = {
'configuration_trocr': ['TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TrOCRConfig'],
'processing_trocr': ['TrOCRProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
'TROCR_PRETRAINED_MODEL_ARCHIVE_LIST',
'TrOCRForCausalLM',
'TrOCRPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 650
| 1
|
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class UpperCamelCase__ ( unittest.TestCase ):
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase_ = {
"""task_specific_params""": {
"""summarization""": {"""length_penalty""": 1.0, """max_length""": 128, """min_length""": 12, """num_beams""": 4},
"""summarization_cnn""": {"""length_penalty""": 2.0, """max_length""": 142, """min_length""": 56, """num_beams""": 4},
"""summarization_xsum""": {"""length_penalty""": 1.0, """max_length""": 62, """min_length""": 11, """num_beams""": 6},
}
}
lowercase_ = {
"""task_specific_params.summarization.length_penalty""": 1.0,
"""task_specific_params.summarization.max_length""": 128,
"""task_specific_params.summarization.min_length""": 12,
"""task_specific_params.summarization.num_beams""": 4,
"""task_specific_params.summarization_cnn.length_penalty""": 2.0,
"""task_specific_params.summarization_cnn.max_length""": 142,
"""task_specific_params.summarization_cnn.min_length""": 56,
"""task_specific_params.summarization_cnn.num_beams""": 4,
"""task_specific_params.summarization_xsum.length_penalty""": 1.0,
"""task_specific_params.summarization_xsum.max_length""": 62,
"""task_specific_params.summarization_xsum.min_length""": 11,
"""task_specific_params.summarization_xsum.num_beams""": 6,
}
self.assertEqual(flatten_dict(UpperCamelCase__ ) , UpperCamelCase__ )
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
lowercase_ = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(transpose(UpperCamelCase__ ) , x.transpose() ) )
lowercase_ = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(transpose(UpperCamelCase__ , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) )
@require_torch
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
lowercase_ = np.random.randn(3 , 4 )
lowercase_ = torch.tensor(UpperCamelCase__ )
self.assertTrue(np.allclose(transpose(UpperCamelCase__ ) , transpose(UpperCamelCase__ ).numpy() ) )
lowercase_ = np.random.randn(3 , 4 , 5 )
lowercase_ = torch.tensor(UpperCamelCase__ )
self.assertTrue(np.allclose(transpose(UpperCamelCase__ , axes=(1, 2, 0) ) , transpose(UpperCamelCase__ , axes=(1, 2, 0) ).numpy() ) )
@require_tf
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase_ = np.random.randn(3 , 4 )
lowercase_ = tf.constant(UpperCamelCase__ )
self.assertTrue(np.allclose(transpose(UpperCamelCase__ ) , transpose(UpperCamelCase__ ).numpy() ) )
lowercase_ = np.random.randn(3 , 4 , 5 )
lowercase_ = tf.constant(UpperCamelCase__ )
self.assertTrue(np.allclose(transpose(UpperCamelCase__ , axes=(1, 2, 0) ) , transpose(UpperCamelCase__ , axes=(1, 2, 0) ).numpy() ) )
@require_flax
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
lowercase_ = np.random.randn(3 , 4 )
lowercase_ = jnp.array(UpperCamelCase__ )
self.assertTrue(np.allclose(transpose(UpperCamelCase__ ) , np.asarray(transpose(UpperCamelCase__ ) ) ) )
lowercase_ = np.random.randn(3 , 4 , 5 )
lowercase_ = jnp.array(UpperCamelCase__ )
self.assertTrue(np.allclose(transpose(UpperCamelCase__ , axes=(1, 2, 0) ) , np.asarray(transpose(UpperCamelCase__ , axes=(1, 2, 0) ) ) ) )
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
lowercase_ = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(reshape(UpperCamelCase__ , (4, 3) ) , np.reshape(UpperCamelCase__ , (4, 3) ) ) )
lowercase_ = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(reshape(UpperCamelCase__ , (12, 5) ) , np.reshape(UpperCamelCase__ , (12, 5) ) ) )
@require_torch
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ = np.random.randn(3 , 4 )
lowercase_ = torch.tensor(UpperCamelCase__ )
self.assertTrue(np.allclose(reshape(UpperCamelCase__ , (4, 3) ) , reshape(UpperCamelCase__ , (4, 3) ).numpy() ) )
lowercase_ = np.random.randn(3 , 4 , 5 )
lowercase_ = torch.tensor(UpperCamelCase__ )
self.assertTrue(np.allclose(reshape(UpperCamelCase__ , (12, 5) ) , reshape(UpperCamelCase__ , (12, 5) ).numpy() ) )
@require_tf
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ = np.random.randn(3 , 4 )
lowercase_ = tf.constant(UpperCamelCase__ )
self.assertTrue(np.allclose(reshape(UpperCamelCase__ , (4, 3) ) , reshape(UpperCamelCase__ , (4, 3) ).numpy() ) )
lowercase_ = np.random.randn(3 , 4 , 5 )
lowercase_ = tf.constant(UpperCamelCase__ )
self.assertTrue(np.allclose(reshape(UpperCamelCase__ , (12, 5) ) , reshape(UpperCamelCase__ , (12, 5) ).numpy() ) )
@require_flax
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
lowercase_ = np.random.randn(3 , 4 )
lowercase_ = jnp.array(UpperCamelCase__ )
self.assertTrue(np.allclose(reshape(UpperCamelCase__ , (4, 3) ) , np.asarray(reshape(UpperCamelCase__ , (4, 3) ) ) ) )
lowercase_ = np.random.randn(3 , 4 , 5 )
lowercase_ = jnp.array(UpperCamelCase__ )
self.assertTrue(np.allclose(reshape(UpperCamelCase__ , (12, 5) ) , np.asarray(reshape(UpperCamelCase__ , (12, 5) ) ) ) )
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
lowercase_ = np.random.randn(1 , 3 , 4 )
self.assertTrue(np.allclose(squeeze(UpperCamelCase__ ) , np.squeeze(UpperCamelCase__ ) ) )
lowercase_ = np.random.randn(1 , 4 , 1 , 5 )
self.assertTrue(np.allclose(squeeze(UpperCamelCase__ , axis=2 ) , np.squeeze(UpperCamelCase__ , axis=2 ) ) )
@require_torch
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ = np.random.randn(1 , 3 , 4 )
lowercase_ = torch.tensor(UpperCamelCase__ )
self.assertTrue(np.allclose(squeeze(UpperCamelCase__ ) , squeeze(UpperCamelCase__ ).numpy() ) )
lowercase_ = np.random.randn(1 , 4 , 1 , 5 )
lowercase_ = torch.tensor(UpperCamelCase__ )
self.assertTrue(np.allclose(squeeze(UpperCamelCase__ , axis=2 ) , squeeze(UpperCamelCase__ , axis=2 ).numpy() ) )
@require_tf
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
lowercase_ = np.random.randn(1 , 3 , 4 )
lowercase_ = tf.constant(UpperCamelCase__ )
self.assertTrue(np.allclose(squeeze(UpperCamelCase__ ) , squeeze(UpperCamelCase__ ).numpy() ) )
lowercase_ = np.random.randn(1 , 4 , 1 , 5 )
lowercase_ = tf.constant(UpperCamelCase__ )
self.assertTrue(np.allclose(squeeze(UpperCamelCase__ , axis=2 ) , squeeze(UpperCamelCase__ , axis=2 ).numpy() ) )
@require_flax
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
lowercase_ = np.random.randn(1 , 3 , 4 )
lowercase_ = jnp.array(UpperCamelCase__ )
self.assertTrue(np.allclose(squeeze(UpperCamelCase__ ) , np.asarray(squeeze(UpperCamelCase__ ) ) ) )
lowercase_ = np.random.randn(1 , 4 , 1 , 5 )
lowercase_ = jnp.array(UpperCamelCase__ )
self.assertTrue(np.allclose(squeeze(UpperCamelCase__ , axis=2 ) , np.asarray(squeeze(UpperCamelCase__ , axis=2 ) ) ) )
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
lowercase_ = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(expand_dims(UpperCamelCase__ , axis=1 ) , np.expand_dims(UpperCamelCase__ , axis=1 ) ) )
@require_torch
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ = np.random.randn(3 , 4 )
lowercase_ = torch.tensor(UpperCamelCase__ )
self.assertTrue(np.allclose(expand_dims(UpperCamelCase__ , axis=1 ) , expand_dims(UpperCamelCase__ , axis=1 ).numpy() ) )
@require_tf
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase_ = np.random.randn(3 , 4 )
lowercase_ = tf.constant(UpperCamelCase__ )
self.assertTrue(np.allclose(expand_dims(UpperCamelCase__ , axis=1 ) , expand_dims(UpperCamelCase__ , axis=1 ).numpy() ) )
@require_flax
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ = np.random.randn(3 , 4 )
lowercase_ = jnp.array(UpperCamelCase__ )
self.assertTrue(np.allclose(expand_dims(UpperCamelCase__ , axis=1 ) , np.asarray(expand_dims(UpperCamelCase__ , axis=1 ) ) ) )
| 650
|
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
a = logging.get_logger(__name__)
a = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
a = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
for attribute in key.split(""".""" ):
lowercase_ = getattr(UpperCAmelCase__ , UpperCAmelCase__ )
if weight_type is not None:
lowercase_ = getattr(UpperCAmelCase__ , UpperCAmelCase__ ).shape
else:
lowercase_ = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
lowercase_ = value
elif weight_type == "weight_g":
lowercase_ = value
elif weight_type == "weight_v":
lowercase_ = value
elif weight_type == "bias":
lowercase_ = value
else:
lowercase_ = value
logger.info(F'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase_ = []
lowercase_ = fairseq_model.state_dict()
lowercase_ = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
lowercase_ = None
for name, value in fairseq_dict.items():
lowercase_ = False
if "conv_layers" in name:
load_conv_layer(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , hf_model.config.feat_extract_norm == """group""" , )
lowercase_ = True
elif name.split(""".""" )[0] == "proj":
lowercase_ = fairseq_model.proj
lowercase_ = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
lowercase_ = True
if "*" in mapped_key:
lowercase_ = name.split(UpperCAmelCase__ )[0].split(""".""" )[-2]
lowercase_ = mapped_key.replace("""*""" , UpperCAmelCase__ )
if "weight_g" in name:
lowercase_ = """weight_g"""
elif "weight_v" in name:
lowercase_ = """weight_v"""
elif "bias" in name:
lowercase_ = """bias"""
elif "weight" in name:
lowercase_ = """weight"""
else:
lowercase_ = None
set_recursively(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
continue
if not is_used:
unused_weights.append(UpperCAmelCase__ )
logger.warning(F'''Unused weights: {unused_weights}''' )
return proj_weight
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase_ = full_name.split("""conv_layers.""" )[-1]
lowercase_ = name.split(""".""" )
lowercase_ = int(items[0] )
lowercase_ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
lowercase_ = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
lowercase_ = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
lowercase_ = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
lowercase_ = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(UpperCAmelCase__ )
def UpperCAmelCase_ ( UpperCAmelCase__ ):
lowercase_ , lowercase_ = emb.weight.shape
lowercase_ = nn.Linear(UpperCAmelCase__ , UpperCAmelCase__ , bias=UpperCAmelCase__ )
lowercase_ = emb.weight.data
return lin_layer
def UpperCAmelCase_ ( UpperCAmelCase__ ):
with open(UpperCAmelCase__ , """r""" , encoding="""utf-8""" ) as f:
lowercase_ = f.readlines()
lowercase_ = [line.split(""" """ )[0] for line in lines]
lowercase_ = len(UpperCAmelCase__ )
lowercase_ = {
"""<s>""": 0,
"""<pad>""": 1,
"""</s>""": 2,
"""<unk>""": 3,
}
vocab_dict.update(dict(zip(UpperCAmelCase__ , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , ):
lowercase_ = WavaVecaConfig.from_pretrained(UpperCAmelCase__ )
lowercase_ = SpeechaTextaConfig.from_pretrained(
UpperCAmelCase__ , vocab_size=UpperCAmelCase__ , decoder_layers=UpperCAmelCase__ , do_stable_layer_norm=UpperCAmelCase__ )
lowercase_ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , )
lowercase_ , lowercase_ , lowercase_ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
lowercase_ = model[0].eval()
# set weights for wav2vec2 encoder
lowercase_ = WavaVecaModel(UpperCAmelCase__ )
lowercase_ = recursively_load_weights_wavaveca(model.encoder , UpperCAmelCase__ )
lowercase_ = SpeechaTextaForCausalLM(UpperCAmelCase__ )
lowercase_ , lowercase_ = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=UpperCAmelCase__ )
# set output linear layer
unexpected_keys.remove("""embed_out""" )
lowercase_ = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(F'''The following keys are missing when loading the decoder weights: {missing_keys}''' )
logger.warning(F'''The following keys are unexpected when loading the decoder weights: {unexpected_keys}''' )
lowercase_ = SpeechEncoderDecoderModel(encoder=UpperCAmelCase__ , decoder=UpperCAmelCase__ )
lowercase_ = False
# add projection layer
lowercase_ = nn.Parameter(projection_layer.weight )
lowercase_ = nn.Parameter(projection_layer.bias )
lowercase_ = create_vocab_dict(UpperCAmelCase__ )
with open(os.path.join(UpperCAmelCase__ , """vocab.json""" ) , """w""" ) as fp:
json.dump(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase_ = SpeechaTextaTokenizer(os.path.join(UpperCAmelCase__ , """vocab.json""" ) )
tokenizer.save_pretrained(UpperCAmelCase__ )
lowercase_ = hf_wavavec.config.to_dict()
lowercase_ = tokenizer.pad_token_id
lowercase_ = tokenizer.bos_token_id
lowercase_ = tokenizer.eos_token_id
lowercase_ = """speech_to_text_2"""
lowercase_ = """wav2vec2"""
lowercase_ = SpeechEncoderDecoderConfig.from_dict(UpperCAmelCase__ )
hf_wavavec.save_pretrained(UpperCAmelCase__ )
feature_extractor.save_pretrained(UpperCAmelCase__ )
if __name__ == "__main__":
a = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument(
'--encoder_config_path',
default='facebook/wav2vec2-large-lv60',
type=str,
help='Path to hf encoder wav2vec2 checkpoint config',
)
parser.add_argument(
'--decoder_config_path',
default='facebook/s2t-small-mustc-en-fr-st',
type=str,
help='Path to hf decoder s2t checkpoint config',
)
parser.add_argument('--vocab_size', default=1_0_2_2_4, type=int, help='Vocab size of decoder')
parser.add_argument('--num_decoder_layers', default=7, type=int, help='Number of decoder layers')
a = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 650
| 1
|
from __future__ import annotations
class UpperCamelCase__ :
def __init__( self : Dict , UpperCamelCase__ : List[Any]=None ):
'''simple docstring'''
lowercase_ = data
lowercase_ = None
def __repr__( self : Any ):
'''simple docstring'''
lowercase_ = []
lowercase_ = self
while temp:
string_rep.append(F'''{temp.data}''' )
lowercase_ = temp.next
return "->".join(UpperCamelCase__ )
def UpperCAmelCase_ ( UpperCAmelCase__ ):
if not elements_list:
raise Exception("""The Elements List is empty""" )
lowercase_ = lowercase_ = Node(elements_list[0] )
for i in range(1 , len(UpperCAmelCase__ ) ):
lowercase_ = Node(elements_list[i] )
lowercase_ = current.next
return head
def UpperCAmelCase_ ( UpperCAmelCase__ ):
if head_node is not None and isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
print_reverse(head_node.next )
print(head_node.data )
def UpperCAmelCase_ ( ):
from doctest import testmod
testmod()
lowercase_ = make_linked_list([1_4, 5_2, 1_4, 1_2, 4_3] )
print("""Linked List:""" )
print(UpperCAmelCase__ )
print("""Elements in Reverse:""" )
print_reverse(UpperCAmelCase__ )
if __name__ == "__main__":
main()
| 650
|
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a = logging.get_logger(__name__)
# TODO Update this
a = {
'facebook/esm-1b': 'https://huggingface.co/facebook/esm-1b/resolve/main/config.json',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class UpperCamelCase__ ( __magic_name__ ):
__SCREAMING_SNAKE_CASE : Tuple = 'esm'
def __init__( self : Optional[Any] , UpperCamelCase__ : Dict=None , UpperCamelCase__ : Tuple=None , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : List[Any]=768 , UpperCamelCase__ : Tuple=12 , UpperCamelCase__ : Optional[int]=12 , UpperCamelCase__ : Dict=3_072 , UpperCamelCase__ : Union[str, Any]=0.1 , UpperCamelCase__ : Dict=0.1 , UpperCamelCase__ : Optional[int]=1_026 , UpperCamelCase__ : Any=0.02 , UpperCamelCase__ : Dict=1e-12 , UpperCamelCase__ : List[str]="absolute" , UpperCamelCase__ : Union[str, Any]=True , UpperCamelCase__ : Tuple=None , UpperCamelCase__ : Union[str, Any]=False , UpperCamelCase__ : Optional[int]=False , UpperCamelCase__ : Dict=None , UpperCamelCase__ : str=None , **UpperCamelCase__ : str , ):
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase__ , mask_token_id=UpperCamelCase__ , **UpperCamelCase__ )
lowercase_ = vocab_size
lowercase_ = hidden_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = intermediate_size
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = max_position_embeddings
lowercase_ = initializer_range
lowercase_ = layer_norm_eps
lowercase_ = position_embedding_type
lowercase_ = use_cache
lowercase_ = emb_layer_norm_before
lowercase_ = token_dropout
lowercase_ = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info("""No esmfold_config supplied for folding model, using default values.""" )
lowercase_ = EsmFoldConfig()
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
lowercase_ = EsmFoldConfig(**UpperCamelCase__ )
lowercase_ = esmfold_config
if vocab_list is None:
logger.warning("""No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!""" )
lowercase_ = get_default_vocab_list()
else:
lowercase_ = vocab_list
else:
lowercase_ = None
lowercase_ = None
if self.esmfold_config is not None and getattr(self.esmfold_config , """use_esm_attn_map""" , UpperCamelCase__ ):
raise ValueError("""The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!""" )
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
lowercase_ = super().to_dict()
if isinstance(self.esmfold_config , UpperCamelCase__ ):
lowercase_ = self.esmfold_config.to_dict()
return output
@dataclass
class UpperCamelCase__ :
__SCREAMING_SNAKE_CASE : str = None
__SCREAMING_SNAKE_CASE : bool = True
__SCREAMING_SNAKE_CASE : bool = False
__SCREAMING_SNAKE_CASE : bool = False
__SCREAMING_SNAKE_CASE : bool = False
__SCREAMING_SNAKE_CASE : float = 0
__SCREAMING_SNAKE_CASE : bool = True
__SCREAMING_SNAKE_CASE : bool = False
__SCREAMING_SNAKE_CASE : int = 128
__SCREAMING_SNAKE_CASE : "TrunkConfig" = None
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
if self.trunk is None:
lowercase_ = TrunkConfig()
elif isinstance(self.trunk , UpperCamelCase__ ):
lowercase_ = TrunkConfig(**self.trunk )
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ = asdict(self )
lowercase_ = self.trunk.to_dict()
return output
@dataclass
class UpperCamelCase__ :
__SCREAMING_SNAKE_CASE : int = 48
__SCREAMING_SNAKE_CASE : int = 1024
__SCREAMING_SNAKE_CASE : int = 128
__SCREAMING_SNAKE_CASE : int = 32
__SCREAMING_SNAKE_CASE : int = 32
__SCREAMING_SNAKE_CASE : int = 32
__SCREAMING_SNAKE_CASE : float = 0
__SCREAMING_SNAKE_CASE : float = 0
__SCREAMING_SNAKE_CASE : bool = False
__SCREAMING_SNAKE_CASE : int = 4
__SCREAMING_SNAKE_CASE : Optional[int] = 128
__SCREAMING_SNAKE_CASE : "StructureModuleConfig" = None
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
if self.structure_module is None:
lowercase_ = StructureModuleConfig()
elif isinstance(self.structure_module , UpperCamelCase__ ):
lowercase_ = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(F'''`max_recycles` should be positive, got {self.max_recycles}.''' )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
"""`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got"""
F''' {self.sequence_state_dim} and {self.sequence_state_dim}.''' )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
"""`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got"""
F''' {self.pairwise_state_dim} and {self.pairwise_state_dim}.''' )
lowercase_ = self.sequence_state_dim // self.sequence_head_width
lowercase_ = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
"""`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got"""
F''' {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.''' )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
"""`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got"""
F''' {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.''' )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(F'''`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.''' )
if self.dropout >= 0.4:
raise ValueError(F'''`dropout` should not be greater than 0.4, got {self.dropout}.''' )
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ = asdict(self )
lowercase_ = self.structure_module.to_dict()
return output
@dataclass
class UpperCamelCase__ :
__SCREAMING_SNAKE_CASE : int = 384
__SCREAMING_SNAKE_CASE : int = 128
__SCREAMING_SNAKE_CASE : int = 16
__SCREAMING_SNAKE_CASE : int = 128
__SCREAMING_SNAKE_CASE : int = 12
__SCREAMING_SNAKE_CASE : int = 4
__SCREAMING_SNAKE_CASE : int = 8
__SCREAMING_SNAKE_CASE : float = 0.1
__SCREAMING_SNAKE_CASE : int = 8
__SCREAMING_SNAKE_CASE : int = 1
__SCREAMING_SNAKE_CASE : int = 2
__SCREAMING_SNAKE_CASE : int = 7
__SCREAMING_SNAKE_CASE : int = 10
__SCREAMING_SNAKE_CASE : float = 1e-8
__SCREAMING_SNAKE_CASE : float = 1e5
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
return asdict(self )
def UpperCAmelCase_ ( ):
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 650
| 1
|
import re
from typing import Callable, List, Optional, Union
import tensorflow as tf
try:
from tensorflow.keras.optimizers.legacy import Adam
except ImportError:
from tensorflow.keras.optimizers import Adam
class UpperCamelCase__ ( tf.keras.optimizers.schedules.LearningRateSchedule ):
def __init__( self : int , UpperCamelCase__ : float , UpperCamelCase__ : Callable , UpperCamelCase__ : int , UpperCamelCase__ : float = 1.0 , UpperCamelCase__ : str = None , ):
'''simple docstring'''
super().__init__()
lowercase_ = initial_learning_rate
lowercase_ = warmup_steps
lowercase_ = power
lowercase_ = decay_schedule_fn
lowercase_ = name
def __call__( self : Dict , UpperCamelCase__ : str ):
'''simple docstring'''
with tf.name_scope(self.name or """WarmUp""" ) as name:
# Implements polynomial warmup. i.e., if global_step < warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
lowercase_ = tf.cast(UpperCamelCase__ , tf.floataa )
lowercase_ = tf.cast(self.warmup_steps , tf.floataa )
lowercase_ = global_step_float / warmup_steps_float
lowercase_ = self.initial_learning_rate * tf.math.pow(UpperCamelCase__ , self.power )
return tf.cond(
global_step_float < warmup_steps_float , lambda: warmup_learning_rate , lambda: self.decay_schedule_fn(step - self.warmup_steps ) , name=UpperCamelCase__ , )
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_schedule_fn": self.decay_schedule_fn,
"warmup_steps": self.warmup_steps,
"power": self.power,
"name": self.name,
}
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = 0.0 , UpperCAmelCase__ = 0.9 , UpperCAmelCase__ = 0.999 , UpperCAmelCase__ = 1e-8 , UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__ = 0.0 , UpperCAmelCase__ = 1.0 , UpperCAmelCase__ = None , ):
lowercase_ = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=UpperCAmelCase__ , decay_steps=num_train_steps - num_warmup_steps , end_learning_rate=init_lr * min_lr_ratio , power=UpperCAmelCase__ , )
if num_warmup_steps:
lowercase_ = WarmUp(
initial_learning_rate=UpperCAmelCase__ , decay_schedule_fn=UpperCAmelCase__ , warmup_steps=UpperCAmelCase__ , )
if weight_decay_rate > 0.0:
lowercase_ = AdamWeightDecay(
learning_rate=UpperCAmelCase__ , weight_decay_rate=UpperCAmelCase__ , beta_a=UpperCAmelCase__ , beta_a=UpperCAmelCase__ , epsilon=UpperCAmelCase__ , clipnorm=UpperCAmelCase__ , global_clipnorm=UpperCAmelCase__ , exclude_from_weight_decay=["""LayerNorm""", """layer_norm""", """bias"""] , include_in_weight_decay=UpperCAmelCase__ , )
else:
lowercase_ = tf.keras.optimizers.Adam(
learning_rate=UpperCAmelCase__ , beta_a=UpperCAmelCase__ , beta_a=UpperCAmelCase__ , epsilon=UpperCAmelCase__ , clipnorm=UpperCAmelCase__ , global_clipnorm=UpperCAmelCase__ , )
# We return the optimizer and the LR scheduler in order to better track the
# evolution of the LR independently of the optimizer.
return optimizer, lr_schedule
class UpperCamelCase__ ( __magic_name__ ):
def __init__( self : Union[str, Any] , UpperCamelCase__ : Union[float, tf.keras.optimizers.schedules.LearningRateSchedule] = 0.001 , UpperCamelCase__ : float = 0.9 , UpperCamelCase__ : float = 0.999 , UpperCamelCase__ : float = 1e-7 , UpperCamelCase__ : bool = False , UpperCamelCase__ : float = 0.0 , UpperCamelCase__ : Optional[List[str]] = None , UpperCamelCase__ : Optional[List[str]] = None , UpperCamelCase__ : str = "AdamWeightDecay" , **UpperCamelCase__ : List[str] , ):
'''simple docstring'''
super().__init__(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ )
lowercase_ = weight_decay_rate
lowercase_ = include_in_weight_decay
lowercase_ = exclude_from_weight_decay
@classmethod
def UpperCAmelCase__ ( cls : Tuple , UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
lowercase_ = {"""WarmUp""": WarmUp}
return super(UpperCamelCase__ , cls ).from_config(UpperCamelCase__ , custom_objects=UpperCamelCase__ )
def UpperCAmelCase__ ( self : int , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
super(UpperCamelCase__ , self )._prepare_local(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
lowercase_ = tf.constant(
self.weight_decay_rate , name="""adam_weight_decay_rate""" )
def UpperCAmelCase__ ( self : Tuple , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[int] ):
'''simple docstring'''
lowercase_ = self._do_use_weight_decay(var.name )
if do_decay:
return var.assign_sub(
learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]["""weight_decay_rate"""] , use_locking=self._use_locking , )
return tf.no_op()
def UpperCAmelCase__ ( self : Tuple , UpperCamelCase__ : Dict , UpperCamelCase__ : int=None , **UpperCamelCase__ : Any ):
'''simple docstring'''
lowercase_ , lowercase_ = list(zip(*UpperCamelCase__ ) )
return super(UpperCamelCase__ , self ).apply_gradients(zip(UpperCamelCase__ , UpperCamelCase__ ) , name=UpperCamelCase__ , **UpperCamelCase__ )
def UpperCAmelCase__ ( self : Optional[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int , UpperCamelCase__ : str ):
'''simple docstring'''
if apply_state is None:
return self._decayed_lr_t[var_dtype], {}
lowercase_ = apply_state or {}
lowercase_ = apply_state.get((var_device, var_dtype) )
if coefficients is None:
lowercase_ = self._fallback_apply_state(UpperCamelCase__ , UpperCamelCase__ )
lowercase_ = coefficients
return coefficients["lr_t"], {"apply_state": apply_state}
def UpperCAmelCase__ ( self : List[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : str=None ):
'''simple docstring'''
lowercase_ , lowercase_ = self._get_lr(var.device , var.dtype.base_dtype , UpperCamelCase__ )
lowercase_ = self._decay_weights_op(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
with tf.control_dependencies([decay] ):
return super(UpperCamelCase__ , self )._resource_apply_dense(UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ )
def UpperCAmelCase__ ( self : int , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : str , UpperCamelCase__ : int=None ):
'''simple docstring'''
lowercase_ , lowercase_ = self._get_lr(var.device , var.dtype.base_dtype , UpperCamelCase__ )
lowercase_ = self._decay_weights_op(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
with tf.control_dependencies([decay] ):
return super(UpperCamelCase__ , self )._resource_apply_sparse(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ )
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
lowercase_ = super().get_config()
config.update({"""weight_decay_rate""": self.weight_decay_rate} )
return config
def UpperCAmelCase__ ( self : str , UpperCamelCase__ : List[str] ):
'''simple docstring'''
if self.weight_decay_rate == 0:
return False
if self._include_in_weight_decay:
for r in self._include_in_weight_decay:
if re.search(UpperCamelCase__ , UpperCamelCase__ ) is not None:
return True
if self._exclude_from_weight_decay:
for r in self._exclude_from_weight_decay:
if re.search(UpperCamelCase__ , UpperCamelCase__ ) is not None:
return False
return True
class UpperCamelCase__ ( __magic_name__ ):
def __init__( self : Optional[Any] ):
'''simple docstring'''
lowercase_ = []
lowercase_ = None
@property
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
if self._accum_steps is None:
lowercase_ = tf.Variable(
tf.constant(0 , dtype=tf.intaa ) , trainable=UpperCamelCase__ , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
return self._accum_steps.value()
@property
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
if not self._gradients:
raise ValueError("""The accumulator should be called first to initialize the gradients""" )
return [gradient.value() if gradient is not None else gradient for gradient in self._gradients]
def __call__( self : Optional[int] , UpperCamelCase__ : Union[str, Any] ):
'''simple docstring'''
if not self._gradients:
lowercase_ = self.step # Create the step variable.
self._gradients.extend(
[
tf.Variable(
tf.zeros_like(UpperCamelCase__ ) , trainable=UpperCamelCase__ , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
if gradient is not None
else gradient
for gradient in gradients
] )
if len(UpperCamelCase__ ) != len(self._gradients ):
raise ValueError(F'''Expected {len(self._gradients )} gradients, but got {len(UpperCamelCase__ )}''' )
for accum_gradient, gradient in zip(self._gradients , UpperCamelCase__ ):
if accum_gradient is not None and gradient is not None:
accum_gradient.assign_add(UpperCamelCase__ )
self._accum_steps.assign_add(1 )
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
if not self._gradients:
return
self._accum_steps.assign(0 )
for gradient in self._gradients:
if gradient is not None:
gradient.assign(tf.zeros_like(UpperCamelCase__ ) )
| 650
|
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import platform
import numpy as np
import psutil
import torch
from accelerate import __version__ as version
from accelerate.commands.config import default_config_file, load_config_from_file
from ..utils import is_npu_available, is_xpu_available
def UpperCAmelCase_ ( UpperCAmelCase__=None ):
if subparsers is not None:
lowercase_ = subparsers.add_parser("""env""" )
else:
lowercase_ = argparse.ArgumentParser("""Accelerate env command""" )
parser.add_argument(
"""--config_file""" , default=UpperCAmelCase__ , help="""The config file to use for the default values in the launching script.""" )
if subparsers is not None:
parser.set_defaults(func=UpperCAmelCase__ )
return parser
def UpperCAmelCase_ ( UpperCAmelCase__ ):
lowercase_ = torch.__version__
lowercase_ = torch.cuda.is_available()
lowercase_ = is_xpu_available()
lowercase_ = is_npu_available()
lowercase_ = """Not found"""
# Get the default from the config file.
if args.config_file is not None or os.path.isfile(UpperCAmelCase__ ):
lowercase_ = load_config_from_file(args.config_file ).to_dict()
lowercase_ = {
"""`Accelerate` version""": version,
"""Platform""": platform.platform(),
"""Python version""": platform.python_version(),
"""Numpy version""": np.__version__,
"""PyTorch version (GPU?)""": F'''{pt_version} ({pt_cuda_available})''',
"""PyTorch XPU available""": str(UpperCAmelCase__ ),
"""PyTorch NPU available""": str(UpperCAmelCase__ ),
"""System RAM""": F'''{psutil.virtual_memory().total / 1_0_2_4 ** 3:.2f} GB''',
}
if pt_cuda_available:
lowercase_ = torch.cuda.get_device_name()
print("""\nCopy-and-paste the text below in your GitHub issue\n""" )
print("""\n""".join([F'''- {prop}: {val}''' for prop, val in info.items()] ) )
print("""- `Accelerate` default config:""" if args.config_file is None else """- `Accelerate` config passed:""" )
lowercase_ = (
"""\n""".join([F'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()] )
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
else F'''\t{accelerate_config}'''
)
print(UpperCAmelCase__ )
lowercase_ = accelerate_config
return info
def UpperCAmelCase_ ( ):
lowercase_ = env_command_parser()
lowercase_ = parser.parse_args()
env_command(UpperCAmelCase__ )
return 0
if __name__ == "__main__":
raise SystemExit(main())
| 650
| 1
|
import cva
import numpy as np
class UpperCamelCase__ :
def __init__( self : List[str] , UpperCamelCase__ : float , UpperCamelCase__ : int ):
'''simple docstring'''
if k in (0.04, 0.06):
lowercase_ = k
lowercase_ = window_size
else:
raise ValueError("""invalid k value""" )
def __str__( self : Optional[int] ):
'''simple docstring'''
return str(self.k )
def UpperCAmelCase__ ( self : Optional[Any] , UpperCamelCase__ : str ):
'''simple docstring'''
lowercase_ = cva.imread(UpperCamelCase__ , 0 )
lowercase_ , lowercase_ = img.shape
lowercase_ = []
lowercase_ = img.copy()
lowercase_ = cva.cvtColor(UpperCamelCase__ , cva.COLOR_GRAY2RGB )
lowercase_ , lowercase_ = np.gradient(UpperCamelCase__ )
lowercase_ = dx**2
lowercase_ = dy**2
lowercase_ = dx * dy
lowercase_ = 0.04
lowercase_ = self.window_size // 2
for y in range(UpperCamelCase__ , h - offset ):
for x in range(UpperCamelCase__ , w - offset ):
lowercase_ = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowercase_ = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowercase_ = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowercase_ = (wxx * wyy) - (wxy**2)
lowercase_ = wxx + wyy
lowercase_ = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 255 )
return color_img, corner_list
if __name__ == "__main__":
a = HarrisCorner(0.04, 3)
a , a = edge_detect.detect('path_to_image')
cva.imwrite('detect.png', color_img)
| 650
|
import inspect
import unittest
import warnings
from transformers import DeiTConfig
from transformers.models.auto import get_values
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
)
from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class UpperCamelCase__ :
def __init__( self : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple=13 , UpperCamelCase__ : Tuple=30 , UpperCamelCase__ : List[Any]=2 , UpperCamelCase__ : Union[str, Any]=3 , UpperCamelCase__ : Union[str, Any]=True , UpperCamelCase__ : Tuple=True , UpperCamelCase__ : int=32 , UpperCamelCase__ : Tuple=5 , UpperCamelCase__ : List[Any]=4 , UpperCamelCase__ : Dict=37 , UpperCamelCase__ : Tuple="gelu" , UpperCamelCase__ : Union[str, Any]=0.1 , UpperCamelCase__ : Optional[int]=0.1 , UpperCamelCase__ : Any=10 , UpperCamelCase__ : Optional[int]=0.02 , UpperCamelCase__ : Dict=3 , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : Union[str, Any]=2 , ):
'''simple docstring'''
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = image_size
lowercase_ = patch_size
lowercase_ = num_channels
lowercase_ = is_training
lowercase_ = use_labels
lowercase_ = hidden_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = intermediate_size
lowercase_ = hidden_act
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = type_sequence_label_size
lowercase_ = initializer_range
lowercase_ = scope
lowercase_ = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
lowercase_ = (image_size // patch_size) ** 2
lowercase_ = num_patches + 2
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase_ = None
if self.use_labels:
lowercase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase_ = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def UpperCAmelCase__ ( self : Tuple , UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple ):
'''simple docstring'''
lowercase_ = DeiTModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowercase_ = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase__ ( self : Optional[int] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[str] , UpperCamelCase__ : str ):
'''simple docstring'''
lowercase_ = DeiTForMaskedImageModeling(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowercase_ = model(UpperCamelCase__ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowercase_ = 1
lowercase_ = DeiTForMaskedImageModeling(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowercase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase_ = model(UpperCamelCase__ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def UpperCAmelCase__ ( self : Tuple , UpperCamelCase__ : str , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[str] ):
'''simple docstring'''
lowercase_ = self.type_sequence_label_size
lowercase_ = DeiTForImageClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowercase_ = model(UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowercase_ = 1
lowercase_ = DeiTForImageClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowercase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase_ = model(UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
lowercase_ = self.prepare_config_and_inputs()
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) = config_and_inputs
lowercase_ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( __magic_name__ , __magic_name__ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Any = (
(
DeiTModel,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
__SCREAMING_SNAKE_CASE : str = (
{
'feature-extraction': DeiTModel,
'image-classification': (DeiTForImageClassification, DeiTForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
__SCREAMING_SNAKE_CASE : Dict = False
__SCREAMING_SNAKE_CASE : List[Any] = False
__SCREAMING_SNAKE_CASE : List[Any] = False
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ = DeiTModelTester(self )
lowercase_ = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ , hidden_size=37 )
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""DeiT does not use inputs_embeds""" )
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
pass
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ = model_class(UpperCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase__ , nn.Linear ) )
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ = model_class(UpperCamelCase__ )
lowercase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ = [*signature.parameters.keys()]
lowercase_ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*UpperCamelCase__ )
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase__ )
def UpperCAmelCase__ ( self : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any]=False ):
'''simple docstring'''
lowercase_ = super()._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
if return_labels:
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
if not self.model_tester.is_training:
return
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ = True
for model_class in self.all_model_classes:
# DeiTForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(UpperCamelCase__ )
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
lowercase_ = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.train()
lowercase_ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
lowercase_ = model(**UpperCamelCase__ ).loss
loss.backward()
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
lowercase_ = False
lowercase_ = True
for model_class in self.all_model_classes:
if model_class in get_values(UpperCamelCase__ ) or not model_class.supports_gradient_checkpointing:
continue
# DeiTForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
continue
lowercase_ = model_class(UpperCamelCase__ )
model.gradient_checkpointing_enable()
model.to(UpperCamelCase__ )
model.train()
lowercase_ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
lowercase_ = model(**UpperCamelCase__ ).loss
loss.backward()
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ = [
{"""title""": """multi_label_classification""", """num_labels""": 2, """dtype""": torch.float},
{"""title""": """single_label_classification""", """num_labels""": 1, """dtype""": torch.long},
{"""title""": """regression""", """num_labels""": 1, """dtype""": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(UpperCamelCase__ ),
*get_values(UpperCamelCase__ ),
]
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F'''Testing {model_class} with {problem_type["title"]}''' ):
lowercase_ = problem_type["""title"""]
lowercase_ = problem_type["""num_labels"""]
lowercase_ = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.train()
lowercase_ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
if problem_type["num_labels"] > 1:
lowercase_ = inputs["""labels"""].unsqueeze(1 ).repeat(1 , problem_type["""num_labels"""] )
lowercase_ = inputs["""labels"""].to(problem_type["""dtype"""] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=UpperCamelCase__ ) as warning_list:
lowercase_ = model(**UpperCamelCase__ ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F'''Something is going wrong in the regression problem: intercepted {w.message}''' )
loss.backward()
@slow
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ = DeiTModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def UpperCAmelCase_ ( ):
lowercase_ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class UpperCamelCase__ ( unittest.TestCase ):
@cached_property
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
return (
DeiTImageProcessor.from_pretrained("""facebook/deit-base-distilled-patch16-224""" )
if is_vision_available()
else None
)
@slow
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
lowercase_ = DeiTForImageClassificationWithTeacher.from_pretrained("""facebook/deit-base-distilled-patch16-224""" ).to(
UpperCamelCase__ )
lowercase_ = self.default_image_processor
lowercase_ = prepare_img()
lowercase_ = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
lowercase_ = model(**UpperCamelCase__ )
# verify the logits
lowercase_ = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
lowercase_ = torch.tensor([-1.0_266, 0.1_912, -1.2_861] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1e-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ = DeiTModel.from_pretrained(
"""facebook/deit-base-distilled-patch16-224""" , torch_dtype=torch.floataa , device_map="""auto""" )
lowercase_ = self.default_image_processor
lowercase_ = prepare_img()
lowercase_ = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" )
lowercase_ = inputs.pixel_values.to(UpperCamelCase__ )
# forward pass to make sure inference works in fp16
with torch.no_grad():
lowercase_ = model(UpperCamelCase__ )
| 650
| 1
|
from __future__ import annotations
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ):
if nth_term == "":
return [""]
lowercase_ = int(UpperCAmelCase__ )
lowercase_ = int(UpperCAmelCase__ )
lowercase_ = []
for temp in range(int(UpperCAmelCase__ ) ):
series.append(F'''1 / {pow(temp + 1 , int(UpperCAmelCase__ ) )}''' if series else """1""" )
return series
if __name__ == "__main__":
import doctest
doctest.testmod()
a = int(input('Enter the last number (nth term) of the P-Series'))
a = int(input('Enter the power for P-Series'))
print('Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p')
print(p_series(nth_term, power))
| 650
|
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 650
| 1
|
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
a = 0
a = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
a = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
a = tuple[int, int]
class UpperCamelCase__ :
def __init__( self : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : Node | None , ):
'''simple docstring'''
lowercase_ = pos_x
lowercase_ = pos_y
lowercase_ = (pos_y, pos_x)
lowercase_ = goal_x
lowercase_ = goal_y
lowercase_ = g_cost
lowercase_ = parent
lowercase_ = self.calculate_heuristic()
lowercase_ = self.g_cost + self.h_cost
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
lowercase_ = self.pos_x - self.goal_x
lowercase_ = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(UpperCamelCase__ ) + abs(UpperCamelCase__ )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self : List[str] , UpperCamelCase__ : Node ):
'''simple docstring'''
return self.f_cost < other.f_cost
class UpperCamelCase__ :
def __init__( self : Optional[Any] , UpperCamelCase__ : TPosition , UpperCamelCase__ : TPosition ):
'''simple docstring'''
lowercase_ = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , UpperCamelCase__ )
lowercase_ = Node(goal[1] , goal[0] , goal[1] , goal[0] , 99_999 , UpperCamelCase__ )
lowercase_ = [self.start]
lowercase_ = []
lowercase_ = False
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
lowercase_ = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(UpperCamelCase__ )
self.closed_nodes.append(UpperCamelCase__ )
lowercase_ = self.get_successors(UpperCamelCase__ )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(UpperCamelCase__ )
else:
# retrieve the best current path
lowercase_ = self.open_nodes.pop(self.open_nodes.index(UpperCamelCase__ ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(UpperCamelCase__ )
else:
self.open_nodes.append(UpperCamelCase__ )
return [self.start.pos]
def UpperCAmelCase__ ( self : List[str] , UpperCamelCase__ : Node ):
'''simple docstring'''
lowercase_ = []
for action in delta:
lowercase_ = parent.pos_x + action[1]
lowercase_ = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(UpperCamelCase__ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
UpperCamelCase__ , UpperCamelCase__ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , UpperCamelCase__ , ) )
return successors
def UpperCAmelCase__ ( self : str , UpperCamelCase__ : Node | None ):
'''simple docstring'''
lowercase_ = node
lowercase_ = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
lowercase_ = current_node.parent
path.reverse()
return path
class UpperCamelCase__ :
def __init__( self : Union[str, Any] , UpperCamelCase__ : TPosition , UpperCamelCase__ : TPosition ):
'''simple docstring'''
lowercase_ = AStar(UpperCamelCase__ , UpperCamelCase__ )
lowercase_ = AStar(UpperCamelCase__ , UpperCamelCase__ )
lowercase_ = False
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
lowercase_ = self.fwd_astar.open_nodes.pop(0 )
lowercase_ = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
UpperCamelCase__ , UpperCamelCase__ )
self.fwd_astar.closed_nodes.append(UpperCamelCase__ )
self.bwd_astar.closed_nodes.append(UpperCamelCase__ )
lowercase_ = current_bwd_node
lowercase_ = current_fwd_node
lowercase_ = {
self.fwd_astar: self.fwd_astar.get_successors(UpperCamelCase__ ),
self.bwd_astar: self.bwd_astar.get_successors(UpperCamelCase__ ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(UpperCamelCase__ )
else:
# retrieve the best current path
lowercase_ = astar.open_nodes.pop(
astar.open_nodes.index(UpperCamelCase__ ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(UpperCamelCase__ )
else:
astar.open_nodes.append(UpperCamelCase__ )
return [self.fwd_astar.start.pos]
def UpperCAmelCase__ ( self : Any , UpperCamelCase__ : Node , UpperCamelCase__ : Node ):
'''simple docstring'''
lowercase_ = self.fwd_astar.retrace_path(UpperCamelCase__ )
lowercase_ = self.bwd_astar.retrace_path(UpperCamelCase__ )
bwd_path.pop()
bwd_path.reverse()
lowercase_ = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
a = (0, 0)
a = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
a = time.time()
a = AStar(init, goal)
a = a_star.search()
a = time.time() - start_time
print(F'''AStar execution time = {end_time:f} seconds''')
a = time.time()
a = BidirectionalAStar(init, goal)
a = time.time() - bd_start_time
print(F'''BidirectionalAStar execution time = {bd_end_time:f} seconds''')
| 650
|
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
a = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class UpperCamelCase__ ( __magic_name__ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[Any] = XGLMTokenizer
__SCREAMING_SNAKE_CASE : List[Any] = XGLMTokenizerFast
__SCREAMING_SNAKE_CASE : List[Any] = True
__SCREAMING_SNAKE_CASE : int = True
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
lowercase_ = XGLMTokenizer(UpperCamelCase__ , keep_accents=UpperCamelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
lowercase_ = """<pad>"""
lowercase_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase__ ) , UpperCamelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase__ ) , UpperCamelCase__ )
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
lowercase_ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(len(UpperCamelCase__ ) , 1_008 )
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1_008 )
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
lowercase_ = XGLMTokenizer(UpperCamelCase__ , keep_accents=UpperCamelCase__ )
lowercase_ = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(UpperCamelCase__ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
lowercase_ = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
UpperCamelCase__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
lowercase_ = tokenizer.convert_tokens_to_ids(UpperCamelCase__ )
self.assertListEqual(
UpperCamelCase__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
lowercase_ = tokenizer.convert_ids_to_tokens(UpperCamelCase__ )
self.assertListEqual(
UpperCamelCase__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
@cached_property
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
return XGLMTokenizer.from_pretrained("""facebook/xglm-564M""" )
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(UpperCamelCase__ , f.name )
lowercase_ = XGLMTokenizer(f.name , keep_accents=UpperCamelCase__ )
lowercase_ = pickle.dumps(UpperCamelCase__ )
pickle.loads(UpperCamelCase__ )
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
lowercase_ = self.get_tokenizer()
lowercase_ = self.get_rust_tokenizer()
lowercase_ = """I was born in 92000, and this is falsé."""
lowercase_ = tokenizer.tokenize(UpperCamelCase__ )
lowercase_ = rust_tokenizer.tokenize(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
lowercase_ = tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
lowercase_ = rust_tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
lowercase_ = self.get_rust_tokenizer()
lowercase_ = tokenizer.encode(UpperCamelCase__ )
lowercase_ = rust_tokenizer.encode(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
@slow
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ = """Hello World!"""
lowercase_ = [2, 31_227, 4_447, 35]
self.assertListEqual(UpperCamelCase__ , self.big_tokenizer.encode(UpperCamelCase__ ) )
@slow
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
lowercase_ = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth"""
)
# fmt: off
lowercase_ = [2, 1_018, 67, 11, 1_988, 2_617, 5_631, 278, 11, 3_407, 48, 71_630, 28_085, 4, 3_234, 157, 13, 6, 5, 6, 4, 3_526, 768, 15, 659, 57, 298, 3_983, 864, 129, 21, 6, 5, 13_675, 377, 652, 7_580, 10_341, 155, 2_817, 422, 1_666, 7, 1_674, 53, 113, 202_277, 17_892, 33, 60, 87, 4, 3_234, 157, 61, 2_667, 52_376, 19, 88, 23, 735]
# fmt: on
self.assertListEqual(UpperCamelCase__ , self.big_tokenizer.encode(UpperCamelCase__ ) )
@slow
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ = {
"""input_ids""": [[2, 108_825, 1_163, 15, 88_010, 473, 15_898, 157, 13_672, 1_857, 312, 8, 238_021, 1_163, 53, 13_672, 1_857, 312, 8, 53_283, 182_396, 8, 18_566, 16, 36_733, 4_101, 8, 230, 244_017, 122_553, 7, 15, 132_597, 4, 293, 12_511, 7_610, 4, 3_414, 132_597, 9, 4, 32_361, 362, 4, 734, 28_512, 32_569, 18, 4, 32_361, 26_096, 14_982, 73, 18_715, 21_433, 235_261, 15, 492, 12_427, 16, 53, 18_715, 21_433, 65_454, 15, 23_659, 563, 16, 278, 597, 2_843, 595, 7_931, 182_396, 64_186, 22, 886, 595, 132_981, 53, 25_540, 3_449, 43_982, 39_901, 5_951, 878, 330, 4, 27_694, 80_269, 312, 53, 6_517, 11_780, 611, 20_408, 5], [2, 6, 132_597, 67, 42_897, 33, 592, 8, 163_729, 25_540, 361, 136_997, 109_514, 173_230, 7, 501, 60, 102_913, 196, 5_631, 235, 63_243, 473, 6, 231_757, 74, 5_277, 7_905, 53, 3_095, 37_317, 22, 454, 183_874, 5], [2, 268, 31_298, 46_530, 6, 132_935, 43_831, 7, 597, 32, 24, 3_688, 9_865, 5]],
"""attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCamelCase__ , model_name="""facebook/xglm-564M""" , padding=UpperCamelCase__ , )
| 650
| 1
|
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import platform
import numpy as np
import psutil
import torch
from accelerate import __version__ as version
from accelerate.commands.config import default_config_file, load_config_from_file
from ..utils import is_npu_available, is_xpu_available
def UpperCAmelCase_ ( UpperCAmelCase__=None ):
if subparsers is not None:
lowercase_ = subparsers.add_parser("""env""" )
else:
lowercase_ = argparse.ArgumentParser("""Accelerate env command""" )
parser.add_argument(
"""--config_file""" , default=UpperCAmelCase__ , help="""The config file to use for the default values in the launching script.""" )
if subparsers is not None:
parser.set_defaults(func=UpperCAmelCase__ )
return parser
def UpperCAmelCase_ ( UpperCAmelCase__ ):
lowercase_ = torch.__version__
lowercase_ = torch.cuda.is_available()
lowercase_ = is_xpu_available()
lowercase_ = is_npu_available()
lowercase_ = """Not found"""
# Get the default from the config file.
if args.config_file is not None or os.path.isfile(UpperCAmelCase__ ):
lowercase_ = load_config_from_file(args.config_file ).to_dict()
lowercase_ = {
"""`Accelerate` version""": version,
"""Platform""": platform.platform(),
"""Python version""": platform.python_version(),
"""Numpy version""": np.__version__,
"""PyTorch version (GPU?)""": F'''{pt_version} ({pt_cuda_available})''',
"""PyTorch XPU available""": str(UpperCAmelCase__ ),
"""PyTorch NPU available""": str(UpperCAmelCase__ ),
"""System RAM""": F'''{psutil.virtual_memory().total / 1_0_2_4 ** 3:.2f} GB''',
}
if pt_cuda_available:
lowercase_ = torch.cuda.get_device_name()
print("""\nCopy-and-paste the text below in your GitHub issue\n""" )
print("""\n""".join([F'''- {prop}: {val}''' for prop, val in info.items()] ) )
print("""- `Accelerate` default config:""" if args.config_file is None else """- `Accelerate` config passed:""" )
lowercase_ = (
"""\n""".join([F'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()] )
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
else F'''\t{accelerate_config}'''
)
print(UpperCAmelCase__ )
lowercase_ = accelerate_config
return info
def UpperCAmelCase_ ( ):
lowercase_ = env_command_parser()
lowercase_ = parser.parse_args()
env_command(UpperCAmelCase__ )
return 0
if __name__ == "__main__":
raise SystemExit(main())
| 650
|
import os
import re
import warnings
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_ta import TaTokenizer
else:
a = None
a = logging.get_logger(__name__)
a = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
a = {
'vocab_file': {
't5-small': 'https://huggingface.co/t5-small/resolve/main/spiece.model',
't5-base': 'https://huggingface.co/t5-base/resolve/main/spiece.model',
't5-large': 'https://huggingface.co/t5-large/resolve/main/spiece.model',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/spiece.model',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/spiece.model',
},
'tokenizer_file': {
't5-small': 'https://huggingface.co/t5-small/resolve/main/tokenizer.json',
't5-base': 'https://huggingface.co/t5-base/resolve/main/tokenizer.json',
't5-large': 'https://huggingface.co/t5-large/resolve/main/tokenizer.json',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/tokenizer.json',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/tokenizer.json',
},
}
# TODO(PVP) - this should be removed in Transformers v5
a = {
't5-small': 5_1_2,
't5-base': 5_1_2,
't5-large': 5_1_2,
't5-3b': 5_1_2,
't5-11b': 5_1_2,
}
class UpperCamelCase__ ( __magic_name__ ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE : int = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE : str = ['input_ids', 'attention_mask']
__SCREAMING_SNAKE_CASE : Dict = TaTokenizer
__SCREAMING_SNAKE_CASE : List[int] = []
def __init__( self : int , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : Any=None , UpperCamelCase__ : Dict="</s>" , UpperCamelCase__ : Tuple="<unk>" , UpperCamelCase__ : Optional[Any]="<pad>" , UpperCamelCase__ : Union[str, Any]=100 , UpperCamelCase__ : Optional[Any]=None , **UpperCamelCase__ : List[str] , ):
'''simple docstring'''
if extra_ids > 0 and additional_special_tokens is None:
lowercase_ = [F'''<extra_id_{i}>''' for i in range(UpperCamelCase__ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra special tokens
lowercase_ = len(set(filter(lambda UpperCamelCase__ : bool("""extra_id_""" in str(UpperCamelCase__ ) ) , UpperCamelCase__ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'''
""" provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids"""
""" tokens""" )
super().__init__(
UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , extra_ids=UpperCamelCase__ , additional_special_tokens=UpperCamelCase__ , **UpperCamelCase__ , )
lowercase_ = vocab_file
lowercase_ = False if not self.vocab_file else True
lowercase_ = extra_ids
@staticmethod
def UpperCAmelCase__ ( UpperCamelCase__ : Tuple , UpperCamelCase__ : str , UpperCamelCase__ : int ):
'''simple docstring'''
if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes:
lowercase_ = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
"""This tokenizer was incorrectly instantiated with a model max length of"""
F''' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'''
""" behavior is kept to avoid breaking backwards compatibility when padding/encoding with"""
""" `truncation is True`.\n- Be aware that you SHOULD NOT rely on"""
F''' {pretrained_model_name_or_path} automatically truncating your input to'''
F''' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'''
F''' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'''
""" `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please"""
""" instantiate this tokenizer with `model_max_length` set to your preferred value.""" , UpperCamelCase__ , )
return max_model_length
def UpperCAmelCase__ ( self : int , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(UpperCamelCase__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowercase_ = os.path.join(
UpperCamelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ):
copyfile(self.vocab_file , UpperCamelCase__ )
logger.info(F'''Copy vocab file to {out_vocab_file}''' )
return (out_vocab_file,)
def UpperCAmelCase__ ( self : int , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
'''simple docstring'''
lowercase_ = token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return self.prefix_tokens + token_ids_a
else:
lowercase_ = token_ids_a + [self.eos_token_id]
return self.prefix_tokens + token_ids_a + token_ids_a
def UpperCAmelCase__ ( self : str , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
'''simple docstring'''
lowercase_ = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
return list(
set(filter(lambda UpperCamelCase__ : bool(re.search(R"""<extra_id_\d+>""" , UpperCamelCase__ ) ) is not None , self.additional_special_tokens ) ) )
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
return [self.convert_tokens_to_ids(UpperCamelCase__ ) for token in self.get_sentinel_tokens()]
| 650
| 1
|
from collections.abc import Sequence
from queue import Queue
class UpperCamelCase__ :
def __init__( self : int , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : Tuple=None ):
'''simple docstring'''
lowercase_ = start
lowercase_ = end
lowercase_ = val
lowercase_ = (start + end) // 2
lowercase_ = left
lowercase_ = right
def __repr__( self : Optional[Any] ):
'''simple docstring'''
return F'''SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})'''
class UpperCamelCase__ :
def __init__( self : str , UpperCamelCase__ : Sequence , UpperCamelCase__ : Any ):
'''simple docstring'''
lowercase_ = collection
lowercase_ = function
if self.collection:
lowercase_ = self._build_tree(0 , len(UpperCamelCase__ ) - 1 )
def UpperCAmelCase__ ( self : Any , UpperCamelCase__ : Any , UpperCamelCase__ : Tuple ):
'''simple docstring'''
self._update_tree(self.root , UpperCamelCase__ , UpperCamelCase__ )
def UpperCAmelCase__ ( self : Dict , UpperCamelCase__ : List[Any] , UpperCamelCase__ : str ):
'''simple docstring'''
return self._query_range(self.root , UpperCamelCase__ , UpperCamelCase__ )
def UpperCAmelCase__ ( self : Dict , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Dict ):
'''simple docstring'''
if start == end:
return SegmentTreeNode(UpperCamelCase__ , UpperCamelCase__ , self.collection[start] )
lowercase_ = (start + end) // 2
lowercase_ = self._build_tree(UpperCamelCase__ , UpperCamelCase__ )
lowercase_ = self._build_tree(mid + 1 , UpperCamelCase__ )
return SegmentTreeNode(UpperCamelCase__ , UpperCamelCase__ , self.fn(left.val , right.val ) , UpperCamelCase__ , UpperCamelCase__ )
def UpperCAmelCase__ ( self : Optional[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : Any ):
'''simple docstring'''
if node.start == i and node.end == i:
lowercase_ = val
return
if i <= node.mid:
self._update_tree(node.left , UpperCamelCase__ , UpperCamelCase__ )
else:
self._update_tree(node.right , UpperCamelCase__ , UpperCamelCase__ )
lowercase_ = self.fn(node.left.val , node.right.val )
def UpperCAmelCase__ ( self : List[str] , UpperCamelCase__ : str , UpperCamelCase__ : Dict , UpperCamelCase__ : Tuple ):
'''simple docstring'''
if node.start == i and node.end == j:
return node.val
if i <= node.mid:
if j <= node.mid:
# range in left child tree
return self._query_range(node.left , UpperCamelCase__ , UpperCamelCase__ )
else:
# range in left child tree and right child tree
return self.fn(
self._query_range(node.left , UpperCamelCase__ , node.mid ) , self._query_range(node.right , node.mid + 1 , UpperCamelCase__ ) , )
else:
# range in right child tree
return self._query_range(node.right , UpperCamelCase__ , UpperCamelCase__ )
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
if self.root is not None:
lowercase_ = Queue()
queue.put(self.root )
while not queue.empty():
lowercase_ = queue.get()
yield node
if node.left is not None:
queue.put(node.left )
if node.right is not None:
queue.put(node.right )
if __name__ == "__main__":
import operator
for fn in [operator.add, max, min]:
print('*' * 5_0)
a = SegmentTree([2, 1, 5, 3, 4], fn)
for node in arr.traverse():
print(node)
print()
arr.update(1, 5)
for node in arr.traverse():
print(node)
print()
print(arr.query_range(3, 4)) # 7
print(arr.query_range(2, 2)) # 5
print(arr.query_range(1, 3)) # 13
print()
| 650
|
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMInverseScheduler,
DDIMScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
StableDiffusionDiffEditPipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_image, slow
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class UpperCamelCase__ ( __magic_name__ , __magic_name__ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Optional[int] = StableDiffusionDiffEditPipeline
__SCREAMING_SNAKE_CASE : Optional[Any] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'height', 'width', 'image'} | {'image_latents'}
__SCREAMING_SNAKE_CASE : List[str] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {'image'} | {'image_latents'}
__SCREAMING_SNAKE_CASE : int = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__SCREAMING_SNAKE_CASE : Any = frozenset([] )
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=UpperCamelCase__ , )
lowercase_ = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=UpperCamelCase__ , set_alpha_to_one=UpperCamelCase__ , )
lowercase_ = DDIMInverseScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=UpperCamelCase__ , set_alpha_to_zero=UpperCamelCase__ , )
torch.manual_seed(0 )
lowercase_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
lowercase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="""gelu""" , projection_dim=512 , )
lowercase_ = CLIPTextModel(UpperCamelCase__ )
lowercase_ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
lowercase_ = {
"""unet""": unet,
"""scheduler""": scheduler,
"""inverse_scheduler""": inverse_scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def UpperCAmelCase__ ( self : int , UpperCamelCase__ : Any , UpperCamelCase__ : Any=0 ):
'''simple docstring'''
lowercase_ = floats_tensor((1, 16, 16) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
lowercase_ = floats_tensor((1, 2, 4, 16, 16) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
if str(UpperCamelCase__ ).startswith("""mps""" ):
lowercase_ = torch.manual_seed(UpperCamelCase__ )
else:
lowercase_ = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
lowercase_ = {
"""prompt""": """a dog and a newt""",
"""mask_image""": mask,
"""image_latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 2,
"""inpaint_strength""": 1.0,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def UpperCAmelCase__ ( self : Optional[int] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : str=0 ):
'''simple docstring'''
lowercase_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
lowercase_ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowercase_ = Image.fromarray(np.uinta(UpperCamelCase__ ) ).convert("""RGB""" )
if str(UpperCamelCase__ ).startswith("""mps""" ):
lowercase_ = torch.manual_seed(UpperCamelCase__ )
else:
lowercase_ = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
lowercase_ = {
"""image""": image,
"""source_prompt""": """a cat and a frog""",
"""target_prompt""": """a dog and a newt""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""num_maps_per_mask""": 2,
"""mask_encode_strength""": 1.0,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def UpperCAmelCase__ ( self : List[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple=0 ):
'''simple docstring'''
lowercase_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
lowercase_ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowercase_ = Image.fromarray(np.uinta(UpperCamelCase__ ) ).convert("""RGB""" )
if str(UpperCamelCase__ ).startswith("""mps""" ):
lowercase_ = torch.manual_seed(UpperCamelCase__ )
else:
lowercase_ = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
lowercase_ = {
"""image""": image,
"""prompt""": """a cat and a frog""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""inpaint_strength""": 1.0,
"""guidance_scale""": 6.0,
"""decode_latents""": True,
"""output_type""": """numpy""",
}
return inputs
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
if not hasattr(self.pipeline_class , """_optional_components""" ):
return
lowercase_ = self.get_dummy_components()
lowercase_ = self.pipeline_class(**UpperCamelCase__ )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
# set all optional components to None and update pipeline config accordingly
for optional_component in pipe._optional_components:
setattr(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} )
lowercase_ = self.get_dummy_inputs(UpperCamelCase__ )
lowercase_ = pipe(**UpperCamelCase__ )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(UpperCamelCase__ )
lowercase_ = self.pipeline_class.from_pretrained(UpperCamelCase__ )
pipe_loaded.to(UpperCamelCase__ )
pipe_loaded.set_progress_bar_config(disable=UpperCamelCase__ )
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(UpperCamelCase__ , UpperCamelCase__ ) is None , F'''`{optional_component}` did not stay set to None after loading.''' , )
lowercase_ = self.get_dummy_inputs(UpperCamelCase__ )
lowercase_ = pipe_loaded(**UpperCamelCase__ )[0]
lowercase_ = np.abs(output - output_loaded ).max()
self.assertLess(UpperCamelCase__ , 1e-4 )
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase_ = """cpu"""
lowercase_ = self.get_dummy_components()
lowercase_ = self.pipeline_class(**UpperCamelCase__ )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
lowercase_ = self.get_dummy_mask_inputs(UpperCamelCase__ )
lowercase_ = pipe.generate_mask(**UpperCamelCase__ )
lowercase_ = mask[0, -3:, -3:]
self.assertEqual(mask.shape , (1, 16, 16) )
lowercase_ = np.array([0] * 9 )
lowercase_ = np.abs(mask_slice.flatten() - expected_slice ).max()
self.assertLessEqual(UpperCamelCase__ , 1e-3 )
self.assertEqual(mask[0, -3, -4] , 0 )
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ = """cpu"""
lowercase_ = self.get_dummy_components()
lowercase_ = self.pipeline_class(**UpperCamelCase__ )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
lowercase_ = self.get_dummy_inversion_inputs(UpperCamelCase__ )
lowercase_ = pipe.invert(**UpperCamelCase__ ).images
lowercase_ = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
lowercase_ = np.array(
[0.5_150, 0.5_134, 0.5_043, 0.5_376, 0.4_694, 0.51_050, 0.5_015, 0.4_407, 0.4_799] , )
lowercase_ = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(UpperCamelCase__ , 1e-3 )
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=5e-3 )
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ = """cpu"""
lowercase_ = self.get_dummy_components()
lowercase_ = {"""beta_start""": 0.00_085, """beta_end""": 0.012, """beta_schedule""": """scaled_linear"""}
lowercase_ = DPMSolverMultistepScheduler(**UpperCamelCase__ )
lowercase_ = DPMSolverMultistepInverseScheduler(**UpperCamelCase__ )
lowercase_ = self.pipeline_class(**UpperCamelCase__ )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
lowercase_ = self.get_dummy_inversion_inputs(UpperCamelCase__ )
lowercase_ = pipe.invert(**UpperCamelCase__ ).images
lowercase_ = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
lowercase_ = np.array(
[0.5_150, 0.5_134, 0.5_043, 0.5_376, 0.4_694, 0.51_050, 0.5_015, 0.4_407, 0.4_799] , )
lowercase_ = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(UpperCamelCase__ , 1e-3 )
@require_torch_gpu
@slow
class UpperCamelCase__ ( unittest.TestCase ):
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@classmethod
def UpperCAmelCase__ ( cls : Dict ):
'''simple docstring'''
lowercase_ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png""" )
lowercase_ = raw_image.convert("""RGB""" ).resize((768, 768) )
lowercase_ = raw_image
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
lowercase_ = torch.manual_seed(0 )
lowercase_ = StableDiffusionDiffEditPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2-1""" , safety_checker=UpperCamelCase__ , torch_dtype=torch.floataa )
lowercase_ = DDIMScheduler.from_config(pipe.scheduler.config )
lowercase_ = DDIMInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
lowercase_ = """a bowl of fruit"""
lowercase_ = """a bowl of pears"""
lowercase_ = pipe.generate_mask(
image=self.raw_image , source_prompt=UpperCamelCase__ , target_prompt=UpperCamelCase__ , generator=UpperCamelCase__ , )
lowercase_ = pipe.invert(
prompt=UpperCamelCase__ , image=self.raw_image , inpaint_strength=0.7 , generator=UpperCamelCase__ ).latents
lowercase_ = pipe(
prompt=UpperCamelCase__ , mask_image=UpperCamelCase__ , image_latents=UpperCamelCase__ , generator=UpperCamelCase__ , negative_prompt=UpperCamelCase__ , inpaint_strength=0.7 , output_type="""numpy""" , ).images[0]
lowercase_ = (
np.array(
load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/diffedit/pears.png""" ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5e-1
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ = torch.manual_seed(0 )
lowercase_ = StableDiffusionDiffEditPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2-1""" , safety_checker=UpperCamelCase__ , torch_dtype=torch.floataa )
lowercase_ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
lowercase_ = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
lowercase_ = """a bowl of fruit"""
lowercase_ = """a bowl of pears"""
lowercase_ = pipe.generate_mask(
image=self.raw_image , source_prompt=UpperCamelCase__ , target_prompt=UpperCamelCase__ , generator=UpperCamelCase__ , )
lowercase_ = pipe.invert(
prompt=UpperCamelCase__ , image=self.raw_image , inpaint_strength=0.7 , generator=UpperCamelCase__ , num_inference_steps=25 , ).latents
lowercase_ = pipe(
prompt=UpperCamelCase__ , mask_image=UpperCamelCase__ , image_latents=UpperCamelCase__ , generator=UpperCamelCase__ , negative_prompt=UpperCamelCase__ , inpaint_strength=0.7 , num_inference_steps=25 , output_type="""numpy""" , ).images[0]
lowercase_ = (
np.array(
load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/diffedit/pears.png""" ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5e-1
| 650
| 1
|
import numpy as np
from scipy.spatial.distance import cdist
from sklearn.metrics import fa_score
import datasets
a = '\\n @inproceedings{kakwani2020indicnlpsuite,\n title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}},\n author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar},\n year={2020},\n booktitle={Findings of EMNLP},\n}\n'
a = '\\n IndicGLUE is a natural language understanding benchmark for Indian languages. It contains a wide\n variety of tasks and covers 11 major Indian languages - as, bn, gu, hi, kn, ml, mr, or, pa, ta, te.\n'
a = '\nCompute IndicGLUE evaluation metric associated to each IndicGLUE dataset.\nArgs:\n predictions: list of predictions to score (as int64),\n except for \'cvit-mkb-clsr\' where each prediction is a vector (of float32).\n references: list of ground truth labels corresponding to the predictions (as int64),\n except for \'cvit-mkb-clsr\' where each reference is a vector (of float32).\nReturns: depending on the IndicGLUE subset, one or several of:\n "accuracy": Accuracy\n "f1": F1 score\n "precision": Precision@10\nExamples:\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wnli\') # \'wnli\' or any of ["copa", "sna", "csqa", "wstp", "inltkh", "bbca", "iitp-mr", "iitp-pr", "actsa-sc", "md"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wiki-ner\')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0, \'f1\': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'cvit-mkb-clsr\')\n >>> references = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> predictions = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'precision@10\': 1.0}\n\n'
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ):
return float((preds == labels).mean() )
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase_ = simple_accuracy(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase_ = float(fa_score(y_true=UpperCAmelCase__ , y_pred=UpperCAmelCase__ ) )
return {
"accuracy": acc,
"f1": fa,
}
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase_ = np.array(UpperCAmelCase__ )
lowercase_ = np.array(UpperCAmelCase__ )
lowercase_ = en_sentvecs.shape[0]
# mean centering
lowercase_ = en_sentvecs - np.mean(UpperCAmelCase__ , axis=0 )
lowercase_ = in_sentvecs - np.mean(UpperCAmelCase__ , axis=0 )
lowercase_ = cdist(UpperCAmelCase__ , UpperCAmelCase__ , """cosine""" )
lowercase_ = np.array(range(UpperCAmelCase__ ) )
lowercase_ = sim.argsort(axis=1 )[:, :1_0]
lowercase_ = np.any(preds == actual[:, None] , axis=1 )
return float(matches.mean() )
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase__ ( datasets.Metric ):
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
if self.config_name not in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"cvit-mkb-clsr",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
"wiki-ner",
]:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"wnli\", \"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", """
"""\"cvit-mkb-clsr\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\", """
"""\"wiki-ner\"]""" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""int64""" )
if self.config_name != """cvit-mkb-clsr"""
else datasets.Sequence(datasets.Value("""float32""" ) ),
"""references""": datasets.Value("""int64""" )
if self.config_name != """cvit-mkb-clsr"""
else datasets.Sequence(datasets.Value("""float32""" ) ),
} ) , codebase_urls=[] , reference_urls=[] , format="""numpy""" if self.config_name != """cvit-mkb-clsr""" else None , )
def UpperCAmelCase__ ( self : Any , UpperCamelCase__ : str , UpperCamelCase__ : Tuple ):
'''simple docstring'''
if self.config_name == "cvit-mkb-clsr":
return {"precision@10": precision_at_aa(UpperCamelCase__ , UpperCamelCase__ )}
elif self.config_name in ["wiki-ner"]:
return acc_and_fa(UpperCamelCase__ , UpperCamelCase__ )
elif self.config_name in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
]:
return {"accuracy": simple_accuracy(UpperCamelCase__ , UpperCamelCase__ )}
else:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"wnli\", \"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", """
"""\"cvit-mkb-clsr\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\", """
"""\"wiki-ner\"]""" )
| 650
|
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
a = logging.get_logger(__name__)
class UpperCamelCase__ ( __magic_name__ ):
__SCREAMING_SNAKE_CASE : str = ['pixel_values']
def __init__( self : List[Any] , UpperCamelCase__ : bool = True , UpperCamelCase__ : Union[int, float] = 1 / 255 , UpperCamelCase__ : bool = True , UpperCamelCase__ : int = 8 , **UpperCamelCase__ : Optional[Any] , ):
'''simple docstring'''
super().__init__(**UpperCamelCase__ )
lowercase_ = do_rescale
lowercase_ = rescale_factor
lowercase_ = do_pad
lowercase_ = pad_size
def UpperCAmelCase__ ( self : int , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : float , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
return rescale(UpperCamelCase__ , scale=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def UpperCAmelCase__ ( self : List[Any] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : int , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None ):
'''simple docstring'''
lowercase_ , lowercase_ = get_image_size(UpperCamelCase__ )
lowercase_ = (old_height // size + 1) * size - old_height
lowercase_ = (old_width // size + 1) * size - old_width
return pad(UpperCamelCase__ , ((0, pad_height), (0, pad_width)) , mode="""symmetric""" , data_format=UpperCamelCase__ )
def UpperCAmelCase__ ( self : Optional[int] , UpperCamelCase__ : ImageInput , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[float] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , UpperCamelCase__ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **UpperCamelCase__ : Dict , ):
'''simple docstring'''
lowercase_ = do_rescale if do_rescale is not None else self.do_rescale
lowercase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase_ = do_pad if do_pad is not None else self.do_pad
lowercase_ = pad_size if pad_size is not None else self.pad_size
lowercase_ = make_list_of_images(UpperCamelCase__ )
if not valid_images(UpperCamelCase__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
# All transformations expect numpy arrays.
lowercase_ = [to_numpy_array(UpperCamelCase__ ) for image in images]
if do_rescale:
lowercase_ = [self.rescale(image=UpperCamelCase__ , scale=UpperCamelCase__ ) for image in images]
if do_pad:
lowercase_ = [self.pad(UpperCamelCase__ , size=UpperCamelCase__ ) for image in images]
lowercase_ = [to_channel_dimension_format(UpperCamelCase__ , UpperCamelCase__ ) for image in images]
lowercase_ = {"""pixel_values""": images}
return BatchFeature(data=UpperCamelCase__ , tensor_type=UpperCamelCase__ )
| 650
| 1
|
import argparse
import os
import re
import torch
from flax.traverse_util import flatten_dict
from tax import checkpoints
from transformers import (
AutoTokenizer,
PixaStructConfig,
PixaStructForConditionalGeneration,
PixaStructImageProcessor,
PixaStructProcessor,
PixaStructTextConfig,
PixaStructVisionConfig,
)
def UpperCAmelCase_ ( UpperCAmelCase__ ):
lowercase_ = checkpoints.load_tax_checkpoint(UpperCAmelCase__ )
lowercase_ = flatten_dict(UpperCAmelCase__ )
return flax_params
def UpperCAmelCase_ ( UpperCAmelCase__ ):
lowercase_ = {}
lowercase_ = {
"""token_embedder""": """embeddings""",
"""encoder_norm""": """layernorm""",
"""kernel""": """weight""",
""".out""": """.output""",
"""scale""": """weight""",
"""embedders_0.pos_embedding""": """row_embedder.weight""",
"""embedders_1.pos_embedding""": """column_embedder.weight""",
}
lowercase_ = {
"""query""": """attention.query""",
"""key""": """attention.key""",
"""value""": """attention.value""",
"""output.dense""": """output""",
"""encoder_decoder_attention.o""": """encoder_decoder_attention.attention.o""",
"""pre_self_attention_layer_norm""": """self_attention.layer_norm""",
"""pre_cross_attention_layer_norm""": """encoder_decoder_attention.layer_norm""",
"""mlp.""": """mlp.DenseReluDense.""",
"""pre_mlp_layer_norm""": """mlp.layer_norm""",
"""self_attention.o""": """self_attention.attention.o""",
"""decoder.embeddings.embedding""": """decoder.embed_tokens.weight""",
"""decoder.relpos_bias.rel_embedding""": """decoder.layer.0.self_attention.attention.relative_attention_bias.weight""",
"""decoder.decoder_norm.weight""": """decoder.final_layer_norm.weight""",
"""decoder.logits_dense.weight""": """decoder.lm_head.weight""",
}
for key in flax_dict.keys():
if "target" in key:
# remove the first prefix from the key
lowercase_ = """.""".join(key[1:] )
# rename the key
for old, new in CONVERSION_MAPPING.items():
lowercase_ = new_key.replace(UpperCAmelCase__ , UpperCAmelCase__ )
if "decoder" in new_key:
for old, new in DECODER_CONVERSION_MAPPING.items():
lowercase_ = new_key.replace(UpperCAmelCase__ , UpperCAmelCase__ )
if "layers" in new_key and "decoder" not in new_key:
# use regex to replace the layer number
lowercase_ = re.sub(r"""layers_(\d+)""" , r"""layer.\1""" , UpperCAmelCase__ )
lowercase_ = new_key.replace("""encoder""" , """encoder.encoder""" )
elif "layers" in new_key and "decoder" in new_key:
# use regex to replace the layer number
lowercase_ = re.sub(r"""layers_(\d+)""" , r"""layer.\1""" , UpperCAmelCase__ )
lowercase_ = flax_dict[key]
lowercase_ = {}
# convert converted_dict into torch format
for key in converted_dict.keys():
if ("embed_tokens" not in key) and ("embedder" not in key):
lowercase_ = torch.from_numpy(converted_dict[key].T )
else:
lowercase_ = torch.from_numpy(converted_dict[key] )
return converted_torch_dict
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__=False , UpperCAmelCase__=False ):
lowercase_ = get_flax_param(UpperCAmelCase__ )
if not use_large:
lowercase_ = PixaStructVisionConfig()
lowercase_ = PixaStructTextConfig()
else:
lowercase_ = PixaStructVisionConfig(
hidden_size=1_5_3_6 , d_ff=3_9_6_8 , num_attention_heads=2_4 , num_hidden_layers=1_8 )
lowercase_ = PixaStructTextConfig(hidden_size=1_5_3_6 , d_ff=3_9_6_8 , num_heads=2_4 , num_layers=1_8 )
lowercase_ = PixaStructConfig(
vision_config=encoder_config.to_dict() , text_config=decoder_config.to_dict() , is_vqa=UpperCAmelCase__ )
lowercase_ = PixaStructForConditionalGeneration(UpperCAmelCase__ )
lowercase_ = rename_and_convert_flax_params(UpperCAmelCase__ )
model.load_state_dict(UpperCAmelCase__ )
lowercase_ = AutoTokenizer.from_pretrained("""ybelkada/test-pix2struct-tokenizer""" )
lowercase_ = PixaStructImageProcessor()
lowercase_ = PixaStructProcessor(image_processor=UpperCAmelCase__ , tokenizer=UpperCAmelCase__ )
if use_large:
lowercase_ = 4_0_9_6
lowercase_ = True
# mkdir if needed
os.makedirs(UpperCAmelCase__ , exist_ok=UpperCAmelCase__ )
model.save_pretrained(UpperCAmelCase__ )
processor.save_pretrained(UpperCAmelCase__ )
print("""Model saved in {}""".format(UpperCAmelCase__ ) )
if __name__ == "__main__":
a = argparse.ArgumentParser()
parser.add_argument('--t5x_checkpoint_path', default=None, type=str, help='Path to the original T5x checkpoint.')
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--use_large', action='store_true', help='Use large model.')
parser.add_argument('--is_vqa', action='store_true', help='Use large model.')
a = parser.parse_args()
convert_pixastruct_original_pytorch_checkpoint_to_hf(
args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large
)
| 650
|
def UpperCAmelCase_ ( UpperCAmelCase__ ):
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
raise TypeError("""Input value must be an 'int' type""" )
lowercase_ = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 650
| 1
|
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ):
return x if y == 0 else greatest_common_divisor(UpperCAmelCase__ , x % y )
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ):
return (x * y) // greatest_common_divisor(UpperCAmelCase__ , UpperCAmelCase__ )
def UpperCAmelCase_ ( UpperCAmelCase__ = 2_0 ):
lowercase_ = 1
for i in range(1 , n + 1 ):
lowercase_ = lcm(UpperCAmelCase__ , UpperCAmelCase__ )
return g
if __name__ == "__main__":
print(F'''{solution() = }''')
| 650
|
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class UpperCamelCase__ ( __magic_name__ , __magic_name__ , __magic_name__ ):
@register_to_config
def __init__( self : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : float , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : str , UpperCamelCase__ : bool = False , ):
'''simple docstring'''
super().__init__()
lowercase_ = nn.Embedding(UpperCamelCase__ , UpperCamelCase__ )
lowercase_ = nn.Embedding(UpperCamelCase__ , UpperCamelCase__ )
lowercase_ = False
lowercase_ = nn.Dropout(p=UpperCamelCase__ )
lowercase_ = TaConfig(
vocab_size=UpperCamelCase__ , d_model=UpperCamelCase__ , num_heads=UpperCamelCase__ , d_kv=UpperCamelCase__ , d_ff=UpperCamelCase__ , dropout_rate=UpperCamelCase__ , feed_forward_proj=UpperCamelCase__ , is_decoder=UpperCamelCase__ , is_encoder_decoder=UpperCamelCase__ , )
lowercase_ = nn.ModuleList()
for lyr_num in range(UpperCamelCase__ ):
lowercase_ = TaBlock(UpperCamelCase__ )
self.encoders.append(UpperCamelCase__ )
lowercase_ = TaLayerNorm(UpperCamelCase__ )
lowercase_ = nn.Dropout(p=UpperCamelCase__ )
def UpperCAmelCase__ ( self : Optional[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : str ):
'''simple docstring'''
lowercase_ = self.token_embedder(UpperCamelCase__ )
lowercase_ = encoder_input_tokens.shape[1]
lowercase_ = torch.arange(UpperCamelCase__ , device=encoder_input_tokens.device )
x += self.position_encoding(UpperCamelCase__ )
lowercase_ = self.dropout_pre(UpperCamelCase__ )
# inverted the attention mask
lowercase_ = encoder_input_tokens.size()
lowercase_ = self.get_extended_attention_mask(UpperCamelCase__ , UpperCamelCase__ )
for lyr in self.encoders:
lowercase_ = lyr(UpperCamelCase__ , UpperCamelCase__ )[0]
lowercase_ = self.layer_norm(UpperCamelCase__ )
return self.dropout_post(UpperCamelCase__ ), encoder_inputs_mask
| 650
| 1
|
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class UpperCamelCase__ ( __magic_name__ ):
def __init__( self : Any , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[str]=13 , UpperCamelCase__ : Optional[int]=7 , UpperCamelCase__ : List[Any]=True , UpperCamelCase__ : Optional[Any]=True , UpperCamelCase__ : Any=False , UpperCamelCase__ : Union[str, Any]=True , UpperCamelCase__ : Tuple=99 , UpperCamelCase__ : List[Any]=32 , UpperCamelCase__ : str=5 , UpperCamelCase__ : List[str]=4 , UpperCamelCase__ : Tuple=37 , UpperCamelCase__ : Optional[Any]="gelu" , UpperCamelCase__ : List[str]=0.1 , UpperCamelCase__ : Union[str, Any]=0.1 , UpperCamelCase__ : List[Any]=512 , UpperCamelCase__ : List[Any]=16 , UpperCamelCase__ : Union[str, Any]=2 , UpperCamelCase__ : Optional[int]=0.02 , UpperCamelCase__ : str=3 , UpperCamelCase__ : str=4 , UpperCamelCase__ : Tuple=None , ):
'''simple docstring'''
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = seq_length
lowercase_ = is_training
lowercase_ = use_input_mask
lowercase_ = use_token_type_ids
lowercase_ = use_labels
lowercase_ = vocab_size
lowercase_ = hidden_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = intermediate_size
lowercase_ = hidden_act
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = max_position_embeddings
lowercase_ = type_vocab_size
lowercase_ = type_sequence_label_size
lowercase_ = initializer_range
lowercase_ = num_labels
lowercase_ = num_choices
lowercase_ = scope
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase_ = None
if self.use_input_mask:
lowercase_ = random_attention_mask([self.batch_size, self.seq_length] )
lowercase_ = None
lowercase_ = None
lowercase_ = None
if self.use_labels:
lowercase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase_ = ids_tensor([self.batch_size] , self.num_choices )
lowercase_ = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
return DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def UpperCAmelCase__ ( self : Tuple , UpperCamelCase__ : Any , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[int] ):
'''simple docstring'''
lowercase_ = DistilBertModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowercase_ = model(UpperCamelCase__ , UpperCamelCase__ )
lowercase_ = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase__ ( self : Union[str, Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : int , UpperCamelCase__ : Any ):
'''simple docstring'''
lowercase_ = DistilBertForMaskedLM(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowercase_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase__ ( self : str , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : str , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Dict ):
'''simple docstring'''
lowercase_ = DistilBertForQuestionAnswering(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowercase_ = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , start_positions=UpperCamelCase__ , end_positions=UpperCamelCase__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase__ ( self : Optional[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : int , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int] ):
'''simple docstring'''
lowercase_ = self.num_labels
lowercase_ = DistilBertForSequenceClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowercase_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase__ ( self : Union[str, Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[str] ):
'''simple docstring'''
lowercase_ = self.num_labels
lowercase_ = DistilBertForTokenClassification(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowercase_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase__ ( self : int , UpperCamelCase__ : Any , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any] ):
'''simple docstring'''
lowercase_ = self.num_choices
lowercase_ = DistilBertForMultipleChoice(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowercase_ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase_ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase_ = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
lowercase_ = self.prepare_config_and_inputs()
((lowercase_) , (lowercase_) , (lowercase_) , (lowercase_) , (lowercase_) , (lowercase_)) = config_and_inputs
lowercase_ = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( __magic_name__ , __magic_name__ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : int = (
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
__SCREAMING_SNAKE_CASE : Dict = (
{
'feature-extraction': DistilBertModel,
'fill-mask': DistilBertForMaskedLM,
'question-answering': DistilBertForQuestionAnswering,
'text-classification': DistilBertForSequenceClassification,
'token-classification': DistilBertForTokenClassification,
'zero-shot': DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
__SCREAMING_SNAKE_CASE : List[str] = True
__SCREAMING_SNAKE_CASE : Dict = True
__SCREAMING_SNAKE_CASE : Any = True
__SCREAMING_SNAKE_CASE : str = True
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ = DistilBertModelTester(self )
lowercase_ = ConfigTester(self , config_class=UpperCamelCase__ , dim=37 )
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*UpperCamelCase__ )
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*UpperCamelCase__ )
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*UpperCamelCase__ )
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*UpperCamelCase__ )
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*UpperCamelCase__ )
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*UpperCamelCase__ )
@slow
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ = DistilBertModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
@slow
@require_torch_gpu
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
lowercase_ = True
lowercase_ = model_class(config=UpperCamelCase__ )
lowercase_ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ )
lowercase_ = torch.jit.trace(
UpperCamelCase__ , (inputs_dict["""input_ids"""].to("""cpu""" ), inputs_dict["""attention_mask"""].to("""cpu""" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(UpperCamelCase__ , os.path.join(UpperCamelCase__ , """traced_model.pt""" ) )
lowercase_ = torch.jit.load(os.path.join(UpperCamelCase__ , """traced_model.pt""" ) , map_location=UpperCamelCase__ )
loaded(inputs_dict["""input_ids"""].to(UpperCamelCase__ ) , inputs_dict["""attention_mask"""].to(UpperCamelCase__ ) )
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
@slow
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
lowercase_ = DistilBertModel.from_pretrained("""distilbert-base-uncased""" )
lowercase_ = torch.tensor([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] )
lowercase_ = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowercase_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ )[0]
lowercase_ = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , UpperCamelCase__ )
lowercase_ = torch.tensor(
[[[-0.1_639, 0.3_299, 0.1_648], [-0.1_746, 0.3_289, 0.1_710], [-0.1_884, 0.3_357, 0.1_810]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , UpperCamelCase__ , atol=1e-4 ) )
| 650
|
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
a = TypeVar('T')
class UpperCamelCase__ ( Generic[T] ):
__SCREAMING_SNAKE_CASE : deque[T] # Cache store of keys
__SCREAMING_SNAKE_CASE : set[T] # References of the keys in cache
__SCREAMING_SNAKE_CASE : int = 10 # Maximum capacity of cache
def __init__( self : str , UpperCamelCase__ : int ):
'''simple docstring'''
lowercase_ = deque()
lowercase_ = set()
if not n:
lowercase_ = sys.maxsize
elif n < 0:
raise ValueError("""n should be an integer greater than 0.""" )
else:
lowercase_ = n
def UpperCAmelCase__ ( self : Optional[int] , UpperCamelCase__ : T ):
'''simple docstring'''
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
lowercase_ = self.dq_store.pop()
self.key_reference.remove(UpperCamelCase__ )
else:
self.dq_store.remove(UpperCamelCase__ )
self.dq_store.appendleft(UpperCamelCase__ )
self.key_reference.add(UpperCamelCase__ )
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
for k in self.dq_store:
print(UpperCamelCase__ )
def __repr__( self : Optional[Any] ):
'''simple docstring'''
return F'''LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}'''
if __name__ == "__main__":
import doctest
doctest.testmod()
a = LRUCache(4)
lru_cache.refer('A')
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer('A')
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 650
| 1
|
from typing import Any
class UpperCamelCase__ :
def __init__( self : Dict , UpperCamelCase__ : Any ):
'''simple docstring'''
lowercase_ = data
lowercase_ = None
def __repr__( self : int ):
'''simple docstring'''
return F'''Node({self.data})'''
class UpperCamelCase__ :
def __init__( self : List[str] ):
'''simple docstring'''
lowercase_ = None
def __iter__( self : Optional[Any] ):
'''simple docstring'''
lowercase_ = self.head
while node:
yield node.data
lowercase_ = node.next
def __len__( self : Tuple ):
'''simple docstring'''
return sum(1 for _ in self )
def __repr__( self : str ):
'''simple docstring'''
return "->".join([str(UpperCamelCase__ ) for item in self] )
def __getitem__( self : Any , UpperCamelCase__ : int ):
'''simple docstring'''
if not 0 <= index < len(self ):
raise ValueError("""list index out of range.""" )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : Any ):
'''simple docstring'''
if not 0 <= index < len(self ):
raise ValueError("""list index out of range.""" )
lowercase_ = self.head
for _ in range(UpperCamelCase__ ):
lowercase_ = current.next
lowercase_ = data
def UpperCAmelCase__ ( self : str , UpperCamelCase__ : Any ):
'''simple docstring'''
self.insert_nth(len(self ) , UpperCamelCase__ )
def UpperCAmelCase__ ( self : Tuple , UpperCamelCase__ : Any ):
'''simple docstring'''
self.insert_nth(0 , UpperCamelCase__ )
def UpperCAmelCase__ ( self : int , UpperCamelCase__ : int , UpperCamelCase__ : Any ):
'''simple docstring'''
if not 0 <= index <= len(self ):
raise IndexError("""list index out of range""" )
lowercase_ = Node(UpperCamelCase__ )
if self.head is None:
lowercase_ = new_node
elif index == 0:
lowercase_ = self.head # link new_node to head
lowercase_ = new_node
else:
lowercase_ = self.head
for _ in range(index - 1 ):
lowercase_ = temp.next
lowercase_ = temp.next
lowercase_ = new_node
def UpperCAmelCase__ ( self : Dict ): # print every node data
'''simple docstring'''
print(self )
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
return self.delete_nth(0 )
def UpperCAmelCase__ ( self : int ): # delete from tail
'''simple docstring'''
return self.delete_nth(len(self ) - 1 )
def UpperCAmelCase__ ( self : str , UpperCamelCase__ : int = 0 ):
'''simple docstring'''
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError("""List index out of range.""" )
lowercase_ = self.head # default first node
if index == 0:
lowercase_ = self.head.next
else:
lowercase_ = self.head
for _ in range(index - 1 ):
lowercase_ = temp.next
lowercase_ = temp.next
lowercase_ = temp.next.next
return delete_node.data
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
return self.head is None
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
lowercase_ = None
lowercase_ = self.head
while current:
# Store the current node's next node.
lowercase_ = current.next
# Make the current node's next point backwards
lowercase_ = prev
# Make the previous node be the current node
lowercase_ = current
# Make the current node the next node (to progress iteration)
lowercase_ = next_node
# Return prev in order to put the head at the end
lowercase_ = prev
def UpperCAmelCase_ ( ):
lowercase_ = LinkedList()
assert linked_list.is_empty() is True
assert str(UpperCAmelCase__ ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(1_0 ):
assert len(UpperCAmelCase__ ) == i
linked_list.insert_nth(UpperCAmelCase__ , i + 1 )
assert str(UpperCAmelCase__ ) == "->".join(str(UpperCAmelCase__ ) for i in range(1 , 1_1 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(1_1 )
assert str(UpperCAmelCase__ ) == "->".join(str(UpperCAmelCase__ ) for i in range(0 , 1_2 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 1_0
assert linked_list.delete_tail() == 1_1
assert len(UpperCAmelCase__ ) == 9
assert str(UpperCAmelCase__ ) == "->".join(str(UpperCAmelCase__ ) for i in range(1 , 1_0 ) )
assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True
for i in range(0 , 9 ):
lowercase_ = -i
assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True
linked_list.reverse()
assert str(UpperCAmelCase__ ) == "->".join(str(UpperCAmelCase__ ) for i in range(-8 , 1 ) )
def UpperCAmelCase_ ( ):
lowercase_ = [
-9,
1_0_0,
Node(7_7_3_4_5_1_1_2 ),
"""dlrow olleH""",
7,
5_5_5_5,
0,
-192.55555,
"""Hello, world!""",
77.9,
Node(1_0 ),
None,
None,
12.20,
]
lowercase_ = LinkedList()
for i in test_input:
linked_list.insert_tail(UpperCAmelCase__ )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(UpperCAmelCase__ ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
lowercase_ = linked_list.delete_head()
assert result == -9
assert (
str(UpperCAmelCase__ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
lowercase_ = linked_list.delete_tail()
assert result == 12.2
assert (
str(UpperCAmelCase__ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
lowercase_ = linked_list.delete_nth(1_0 )
assert result is None
assert (
str(UpperCAmelCase__ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node("""Hello again, world!""" ) )
assert (
str(UpperCAmelCase__ )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(UpperCAmelCase__ )
assert (
str(UpperCAmelCase__ )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(UpperCAmelCase__ )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def UpperCAmelCase_ ( ):
from doctest import testmod
testmod()
lowercase_ = LinkedList()
linked_list.insert_head(input("""Inserting 1st at head """ ).strip() )
linked_list.insert_head(input("""Inserting 2nd at head """ ).strip() )
print("""\nPrint list:""" )
linked_list.print_list()
linked_list.insert_tail(input("""\nInserting 1st at tail """ ).strip() )
linked_list.insert_tail(input("""Inserting 2nd at tail """ ).strip() )
print("""\nPrint list:""" )
linked_list.print_list()
print("""\nDelete head""" )
linked_list.delete_head()
print("""Delete tail""" )
linked_list.delete_tail()
print("""\nPrint list:""" )
linked_list.print_list()
print("""\nReverse linked list""" )
linked_list.reverse()
print("""\nPrint list:""" )
linked_list.print_list()
print("""\nString representation of linked list:""" )
print(UpperCAmelCase__ )
print("""\nReading/changing Node data using indexing:""" )
print(F'''Element at Position 1: {linked_list[1]}''' )
lowercase_ = input("""Enter New Value: """ ).strip()
print("""New list:""" )
print(UpperCAmelCase__ )
print(F'''length of linked_list is : {len(UpperCAmelCase__ )}''' )
if __name__ == "__main__":
main()
| 650
|
def UpperCAmelCase_ ( UpperCAmelCase__ ):
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 650
| 1
|
from __future__ import annotations
import csv
import requests
from bsa import BeautifulSoup
def UpperCAmelCase_ ( UpperCAmelCase__ = "" ):
lowercase_ = url or """https://www.imdb.com/chart/top/?ref_=nv_mv_250"""
lowercase_ = BeautifulSoup(requests.get(UpperCAmelCase__ ).text , """html.parser""" )
lowercase_ = soup.find_all("""td""" , attrs="""titleColumn""" )
lowercase_ = soup.find_all("""td""" , class_="""ratingColumn imdbRating""" )
return {
title.a.text: float(rating.strong.text )
for title, rating in zip(UpperCAmelCase__ , UpperCAmelCase__ )
}
def UpperCAmelCase_ ( UpperCAmelCase__ = "IMDb_Top_250_Movies.csv" ):
lowercase_ = get_imdb_top_aaa_movies()
with open(UpperCAmelCase__ , """w""" , newline="""""" ) as out_file:
lowercase_ = csv.writer(UpperCAmelCase__ )
writer.writerow(["""Movie title""", """IMDb rating"""] )
for title, rating in movies.items():
writer.writerow([title, rating] )
if __name__ == "__main__":
write_movies()
| 650
|
def UpperCAmelCase_ ( UpperCAmelCase__=2_8_1_2_3 ):
lowercase_ = [1] * (limit + 1)
for i in range(2 , int(limit**0.5 ) + 1 ):
sum_divs[i * i] += i
for k in range(i + 1 , limit // i + 1 ):
sum_divs[k * i] += k + i
lowercase_ = set()
lowercase_ = 0
for n in range(1 , limit + 1 ):
if sum_divs[n] > n:
abundants.add(UpperCAmelCase__ )
if not any((n - a in abundants) for a in abundants ):
res += n
return res
if __name__ == "__main__":
print(solution())
| 650
| 1
|
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
a = (
'4S 3H 2C 7S 5H',
'9D 8H 2C 6S 7H',
'2D 6D 9D TH 7D',
'TC 8C 2S JH 6C',
'JH 8S TH AH QH',
'TS KS 5S 9S AC',
'KD 6S 9D TH AD',
'KS 8D 4D 9S 4S', # pair
'8C 4S KH JS 4D', # pair
'QH 8H KD JH 8S', # pair
'KC 4H KS 2H 8D', # pair
'KD 4S KC 3H 8S', # pair
'AH 8S AS KC JH', # pair
'3H 4C 4H 3S 2H', # 2 pairs
'5S 5D 2C KH KH', # 2 pairs
'3C KH 5D 5S KH', # 2 pairs
'AS 3C KH AD KH', # 2 pairs
'7C 7S 3S 7H 5S', # 3 of a kind
'7C 7S KH 2H 7H', # 3 of a kind
'AC KH QH AH AS', # 3 of a kind
'2H 4D 3C AS 5S', # straight (low ace)
'3C 5C 4C 2C 6H', # straight
'6S 8S 7S 5H 9H', # straight
'JS QS 9H TS KH', # straight
'QC KH TS JS AH', # straight (high ace)
'8C 9C 5C 3C TC', # flush
'3S 8S 9S 5S KS', # flush
'4C 5C 9C 8C KC', # flush
'JH 8H AH KH QH', # flush
'3D 2H 3H 2C 2D', # full house
'2H 2C 3S 3H 3D', # full house
'KH KC 3S 3H 3D', # full house
'JC 6H JS JD JH', # 4 of a kind
'JC 7H JS JD JH', # 4 of a kind
'JC KH JS JD JH', # 4 of a kind
'2S AS 4S 5S 3S', # straight flush (low ace)
'2D 6D 3D 4D 5D', # straight flush
'5C 6C 3C 7C 4C', # straight flush
'JH 9H TH KH QH', # straight flush
'JH AH TH KH QH', # royal flush (high ace straight flush)
)
a = (
('2H 3H 4H 5H 6H', 'KS AS TS QS JS', 'Loss'),
('2H 3H 4H 5H 6H', 'AS AD AC AH JD', 'Win'),
('AS AH 2H AD AC', 'JS JD JC JH 3D', 'Win'),
('2S AH 2H AS AC', 'JS JD JC JH AD', 'Loss'),
('2S AH 2H AS AC', '2H 3H 5H 6H 7H', 'Win'),
('AS 3S 4S 8S 2S', '2H 3H 5H 6H 7H', 'Win'),
('2H 3H 5H 6H 7H', '2S 3H 4H 5S 6C', 'Win'),
('2S 3H 4H 5S 6C', '3D 4C 5H 6H 2S', 'Tie'),
('2S 3H 4H 5S 6C', 'AH AC 5H 6H AS', 'Win'),
('2S 2H 4H 5S 4C', 'AH AC 5H 6H AS', 'Loss'),
('2S 2H 4H 5S 4C', 'AH AC 5H 6H 7S', 'Win'),
('6S AD 7H 4S AS', 'AH AC 5H 6H 7S', 'Loss'),
('2S AH 4H 5S KC', 'AH AC 5H 6H 7S', 'Loss'),
('2S 3H 6H 7S 9C', '7H 3C TH 6H 9S', 'Loss'),
('4S 5H 6H TS AC', '3S 5H 6H TS AC', 'Win'),
('2S AH 4H 5S 6C', 'AD 4C 5H 6H 2C', 'Tie'),
('AS AH 3H AD AC', 'AS AH 2H AD AC', 'Win'),
('AH AC 5H 5C QS', 'AH AC 5H 5C KS', 'Loss'),
('AH AC 5H 5C QS', 'KH KC 5H 5C QS', 'Win'),
('7C 7S KH 2H 7H', '3C 3S AH 2H 3H', 'Win'),
('3C 3S AH 2H 3H', '7C 7S KH 2H 7H', 'Loss'),
('6H 5H 4H 3H 2H', '5H 4H 3H 2H AH', 'Win'),
('5H 4H 3H 2H AH', '5H 4H 3H 2H AH', 'Tie'),
('5H 4H 3H 2H AH', '6H 5H 4H 3H 2H', 'Loss'),
('AH AD KS KC AC', 'AH KD KH AC KC', 'Win'),
('2H 4D 3C AS 5S', '2H 4D 3C 6S 5S', 'Loss'),
('2H 3S 3C 3H 2S', '3S 3C 2S 2H 2D', 'Win'),
('4D 6D 5D 2D JH', '3S 8S 3H TC KH', 'Loss'),
('4S 6C 8S 3S 7S', 'AD KS 2D 7D 7C', 'Loss'),
('6S 4C 7H 8C 3H', '5H JC AH 9D 9C', 'Loss'),
('9D 9H JH TC QH', '3C 2S JS 5C 7H', 'Win'),
('2H TC 8S AD 9S', '4H TS 7H 2C 5C', 'Win'),
('9D 3S 2C 7S 7C', 'JC TD 3C TC 9H', 'Loss'),
)
a = (
('2H 3H 4H 5H 6H', True),
('AS AH 2H AD AC', False),
('2H 3H 5H 6H 7H', True),
('KS AS TS QS JS', True),
('8H 9H QS JS TH', False),
('AS 3S 4S 8S 2S', True),
)
a = (
('2H 3H 4H 5H 6H', True),
('AS AH 2H AD AC', False),
('2H 3H 5H 6H 7H', False),
('KS AS TS QS JS', True),
('8H 9H QS JS TH', True),
)
a = (
('2H 4D 3C AS 5S', True, [5, 4, 3, 2, 1_4]),
('2H 5D 3C AS 5S', False, [1_4, 5, 5, 3, 2]),
('JH QD KC AS TS', False, [1_4, 1_3, 1_2, 1_1, 1_0]),
('9D 3S 2C 7S 7C', False, [9, 7, 7, 3, 2]),
)
a = (
('JH AH TH KH QH', 0),
('JH 9H TH KH QH', 0),
('JC KH JS JD JH', 7),
('KH KC 3S 3H 3D', 6),
('8C 9C 5C 3C TC', 0),
('JS QS 9H TS KH', 0),
('7C 7S KH 2H 7H', 3),
('3C KH 5D 5S KH', 2),
('QH 8H KD JH 8S', 1),
('2D 6D 9D TH 7D', 0),
)
a = (
('JH AH TH KH QH', 2_3),
('JH 9H TH KH QH', 2_2),
('JC KH JS JD JH', 2_1),
('KH KC 3S 3H 3D', 2_0),
('8C 9C 5C 3C TC', 1_9),
('JS QS 9H TS KH', 1_8),
('7C 7S KH 2H 7H', 1_7),
('3C KH 5D 5S KH', 1_6),
('QH 8H KD JH 8S', 1_5),
('2D 6D 9D TH 7D', 1_4),
)
def UpperCAmelCase_ ( ):
lowercase_ , lowercase_ = randrange(len(UpperCAmelCase__ ) ), randrange(len(UpperCAmelCase__ ) )
lowercase_ = ["""Loss""", """Tie""", """Win"""][(play >= oppo) + (play > oppo)]
lowercase_ , lowercase_ = SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def UpperCAmelCase_ ( UpperCAmelCase__ = 1_0_0 ):
return (generate_random_hand() for _ in range(UpperCAmelCase__ ))
@pytest.mark.parametrize("""hand, expected""" , UpperCAmelCase__ )
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ):
assert PokerHand(UpperCAmelCase__ )._is_flush() == expected
@pytest.mark.parametrize("""hand, expected""" , UpperCAmelCase__ )
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ):
assert PokerHand(UpperCAmelCase__ )._is_straight() == expected
@pytest.mark.parametrize("""hand, expected, card_values""" , UpperCAmelCase__ )
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase_ = PokerHand(UpperCAmelCase__ )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize("""hand, expected""" , UpperCAmelCase__ )
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ):
assert PokerHand(UpperCAmelCase__ )._is_same_kind() == expected
@pytest.mark.parametrize("""hand, expected""" , UpperCAmelCase__ )
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ):
assert PokerHand(UpperCAmelCase__ )._hand_type == expected
@pytest.mark.parametrize("""hand, other, expected""" , UpperCAmelCase__ )
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
assert PokerHand(UpperCAmelCase__ ).compare_with(PokerHand(UpperCAmelCase__ ) ) == expected
@pytest.mark.parametrize("""hand, other, expected""" , generate_random_hands() )
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
assert PokerHand(UpperCAmelCase__ ).compare_with(PokerHand(UpperCAmelCase__ ) ) == expected
def UpperCAmelCase_ ( ):
lowercase_ = [PokerHand(UpperCAmelCase__ ) for hand in SORTED_HANDS]
lowercase_ = poker_hands.copy()
shuffle(UpperCAmelCase__ )
lowercase_ = chain(sorted(UpperCAmelCase__ ) )
for index, hand in enumerate(UpperCAmelCase__ ):
assert hand == poker_hands[index]
def UpperCAmelCase_ ( ):
# Test that five high straights are compared correctly.
lowercase_ = [PokerHand("""2D AC 3H 4H 5S""" ), PokerHand("""2S 3H 4H 5S 6C""" )]
pokerhands.sort(reverse=UpperCAmelCase__ )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def UpperCAmelCase_ ( ):
# Multiple calls to five_high_straight function should still return True
# and shouldn't mutate the list in every call other than the first.
lowercase_ = PokerHand("""2C 4S AS 3D 5C""" )
lowercase_ = True
lowercase_ = [5, 4, 3, 2, 1_4]
for _ in range(1_0 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def UpperCAmelCase_ ( ):
# Problem number 54 from Project Euler
# Testing from poker_hands.txt file
lowercase_ = 0
lowercase_ = os.path.abspath(os.path.dirname(UpperCAmelCase__ ) )
lowercase_ = os.path.join(UpperCAmelCase__ , """poker_hands.txt""" )
with open(UpperCAmelCase__ ) as file_hand:
for line in file_hand:
lowercase_ = line[:1_4].strip()
lowercase_ = line[1_5:].strip()
lowercase_ , lowercase_ = PokerHand(UpperCAmelCase__ ), PokerHand(UpperCAmelCase__ )
lowercase_ = player.compare_with(UpperCAmelCase__ )
if output == "Win":
answer += 1
assert answer == 3_7_6
| 650
|
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class UpperCamelCase__ :
def __init__( self : List[str] , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[str]=13 , UpperCamelCase__ : str=7 , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : str=False , UpperCamelCase__ : str=True , UpperCamelCase__ : Union[str, Any]=99 , UpperCamelCase__ : Dict=32 , UpperCamelCase__ : Union[str, Any]=5 , UpperCamelCase__ : Tuple=4 , UpperCamelCase__ : Dict=37 , UpperCamelCase__ : Any="gelu" , UpperCamelCase__ : Dict=0.1 , UpperCamelCase__ : Tuple=0.1 , UpperCamelCase__ : Tuple=512 , UpperCamelCase__ : Union[str, Any]=16 , UpperCamelCase__ : List[str]=2 , UpperCamelCase__ : int=0.02 , UpperCamelCase__ : int=3 , UpperCamelCase__ : Optional[Any]=4 , UpperCamelCase__ : int=None , ):
'''simple docstring'''
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = seq_length
lowercase_ = is_training
lowercase_ = use_input_mask
lowercase_ = use_token_type_ids
lowercase_ = use_labels
lowercase_ = vocab_size
lowercase_ = hidden_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = intermediate_size
lowercase_ = hidden_act
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = max_position_embeddings
lowercase_ = type_vocab_size
lowercase_ = type_sequence_label_size
lowercase_ = initializer_range
lowercase_ = num_labels
lowercase_ = num_choices
lowercase_ = scope
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase_ = None
if self.use_input_mask:
lowercase_ = random_attention_mask([self.batch_size, self.seq_length] )
lowercase_ = None
if self.use_token_type_ids:
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase_ = None
lowercase_ = None
lowercase_ = None
if self.use_labels:
lowercase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase_ = ids_tensor([self.batch_size] , self.num_choices )
lowercase_ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , use_stable_embedding=UpperCamelCase__ , )
def UpperCAmelCase__ ( self : str , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Dict , UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : str , UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
lowercase_ = OpenLlamaModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowercase_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ )
lowercase_ = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase__ ( self : Union[str, Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : str , UpperCamelCase__ : Tuple , UpperCamelCase__ : str , UpperCamelCase__ : int , UpperCamelCase__ : str , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any] , ):
'''simple docstring'''
lowercase_ = True
lowercase_ = OpenLlamaModel(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowercase_ = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , )
lowercase_ = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , )
lowercase_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase__ ( self : Any , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : str , UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Any , UpperCamelCase__ : List[Any] , ):
'''simple docstring'''
lowercase_ = OpenLlamaForCausalLM(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowercase_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase__ ( self : int , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : List[str] , UpperCamelCase__ : int , UpperCamelCase__ : str , UpperCamelCase__ : Tuple , ):
'''simple docstring'''
lowercase_ = True
lowercase_ = True
lowercase_ = OpenLlamaForCausalLM(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
# first forward pass
lowercase_ = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , use_cache=UpperCamelCase__ , )
lowercase_ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowercase_ = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowercase_ = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
lowercase_ = torch.cat([input_ids, next_tokens] , dim=-1 )
lowercase_ = torch.cat([input_mask, next_mask] , dim=-1 )
lowercase_ = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , output_hidden_states=UpperCamelCase__ , )["""hidden_states"""][0]
lowercase_ = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , past_key_values=UpperCamelCase__ , output_hidden_states=UpperCamelCase__ , )["""hidden_states"""][0]
# select random slice
lowercase_ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowercase_ = output_from_no_past[:, -3:, random_slice_idx].detach()
lowercase_ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-3 ) )
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ = self.prepare_config_and_inputs()
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) = config_and_inputs
lowercase_ = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
__SCREAMING_SNAKE_CASE : List[str] = (OpenLlamaForCausalLM,) if is_torch_available() else ()
__SCREAMING_SNAKE_CASE : List[Any] = (
{
'feature-extraction': OpenLlamaModel,
'text-classification': OpenLlamaForSequenceClassification,
'text-generation': OpenLlamaForCausalLM,
'zero-shot': OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
__SCREAMING_SNAKE_CASE : Tuple = False
__SCREAMING_SNAKE_CASE : Optional[int] = False
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
lowercase_ = OpenLlamaModelTester(self )
lowercase_ = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=37 )
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowercase_ = type
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ = 3
lowercase_ = input_dict["""input_ids"""]
lowercase_ = input_ids.ne(1 ).to(UpperCamelCase__ )
lowercase_ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowercase_ = OpenLlamaForSequenceClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowercase_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ = 3
lowercase_ = """single_label_classification"""
lowercase_ = input_dict["""input_ids"""]
lowercase_ = input_ids.ne(1 ).to(UpperCamelCase__ )
lowercase_ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowercase_ = OpenLlamaForSequenceClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowercase_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ = 3
lowercase_ = """multi_label_classification"""
lowercase_ = input_dict["""input_ids"""]
lowercase_ = input_ids.ne(1 ).to(UpperCamelCase__ )
lowercase_ = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
lowercase_ = OpenLlamaForSequenceClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowercase_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip("""Open-Llama buffers include complex numbers, which breaks this test""" )
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
pass
@parameterized.expand([("""linear""",), ("""dynamic""",)] )
def UpperCAmelCase__ ( self : Tuple , UpperCamelCase__ : Union[str, Any] ):
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ = ids_tensor([1, 10] , config.vocab_size )
lowercase_ = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowercase_ = OpenLlamaModel(UpperCamelCase__ )
original_model.to(UpperCamelCase__ )
original_model.eval()
lowercase_ = original_model(UpperCamelCase__ ).last_hidden_state
lowercase_ = original_model(UpperCamelCase__ ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowercase_ = {"""type""": scaling_type, """factor""": 10.0}
lowercase_ = OpenLlamaModel(UpperCamelCase__ )
scaled_model.to(UpperCamelCase__ )
scaled_model.eval()
lowercase_ = scaled_model(UpperCamelCase__ ).last_hidden_state
lowercase_ = scaled_model(UpperCamelCase__ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-5 ) )
| 650
| 1
|
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
a = logging.get_logger()
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = True ):
print(F'''Converting {name}...''' )
with torch.no_grad():
if hidden_sizes == 1_2_8:
if name[-1] == "S":
lowercase_ = timm.create_model("""levit_128s""" , pretrained=UpperCAmelCase__ )
else:
lowercase_ = timm.create_model("""levit_128""" , pretrained=UpperCAmelCase__ )
if hidden_sizes == 1_9_2:
lowercase_ = timm.create_model("""levit_192""" , pretrained=UpperCAmelCase__ )
if hidden_sizes == 2_5_6:
lowercase_ = timm.create_model("""levit_256""" , pretrained=UpperCAmelCase__ )
if hidden_sizes == 3_8_4:
lowercase_ = timm.create_model("""levit_384""" , pretrained=UpperCAmelCase__ )
from_model.eval()
lowercase_ = LevitForImageClassificationWithTeacher(UpperCAmelCase__ ).eval()
lowercase_ = OrderedDict()
lowercase_ = from_model.state_dict()
lowercase_ = list(from_model.state_dict().keys() )
lowercase_ = list(our_model.state_dict().keys() )
print(len(UpperCAmelCase__ ) , len(UpperCAmelCase__ ) )
for i in range(len(UpperCAmelCase__ ) ):
lowercase_ = weights[og_keys[i]]
our_model.load_state_dict(UpperCAmelCase__ )
lowercase_ = torch.randn((2, 3, 2_2_4, 2_2_4) )
lowercase_ = from_model(UpperCAmelCase__ )
lowercase_ = our_model(UpperCAmelCase__ ).logits
assert torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ ), "The model logits don't match the original one."
lowercase_ = name
print(UpperCAmelCase__ )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
lowercase_ = LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(F'''Pushed {checkpoint_name}''' )
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ = None , UpperCAmelCase__ = True ):
lowercase_ = """imagenet-1k-id2label.json"""
lowercase_ = 1_0_0_0
lowercase_ = (1, num_labels)
lowercase_ = """huggingface/label-files"""
lowercase_ = num_labels
lowercase_ = json.load(open(hf_hub_download(UpperCAmelCase__ , UpperCAmelCase__ , repo_type="""dataset""" ) , """r""" ) )
lowercase_ = {int(UpperCAmelCase__ ): v for k, v in idalabel.items()}
lowercase_ = idalabel
lowercase_ = {v: k for k, v in idalabel.items()}
lowercase_ = partial(UpperCAmelCase__ , num_labels=UpperCAmelCase__ , idalabel=UpperCAmelCase__ , labelaid=UpperCAmelCase__ )
lowercase_ = {
"""levit-128S""": 1_2_8,
"""levit-128""": 1_2_8,
"""levit-192""": 1_9_2,
"""levit-256""": 2_5_6,
"""levit-384""": 3_8_4,
}
lowercase_ = {
"""levit-128S""": ImageNetPreTrainedConfig(
hidden_sizes=[1_2_8, 2_5_6, 3_8_4] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[1_6, 1_6, 1_6] , drop_path_rate=0 , ),
"""levit-128""": ImageNetPreTrainedConfig(
hidden_sizes=[1_2_8, 2_5_6, 3_8_4] , num_attention_heads=[4, 8, 1_2] , depths=[4, 4, 4] , key_dim=[1_6, 1_6, 1_6] , drop_path_rate=0 , ),
"""levit-192""": ImageNetPreTrainedConfig(
hidden_sizes=[1_9_2, 2_8_8, 3_8_4] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[3_2, 3_2, 3_2] , drop_path_rate=0 , ),
"""levit-256""": ImageNetPreTrainedConfig(
hidden_sizes=[2_5_6, 3_8_4, 5_1_2] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[3_2, 3_2, 3_2] , drop_path_rate=0 , ),
"""levit-384""": ImageNetPreTrainedConfig(
hidden_sizes=[3_8_4, 5_1_2, 7_6_8] , num_attention_heads=[6, 9, 1_2] , depths=[4, 4, 4] , key_dim=[3_2, 3_2, 3_2] , drop_path_rate=0.1 , ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] , UpperCAmelCase__ , names_to_config[model_name] , UpperCAmelCase__ , UpperCAmelCase__ )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
return config, expected_shape
if __name__ == "__main__":
a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default=None,
type=str,
help='The name of the model you wish to convert, it must be one of the supported Levit* architecture,',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='levit-dump-folder/',
type=Path,
required=False,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
parser.add_argument(
'--no-push_to_hub',
dest='push_to_hub',
action='store_false',
help='Do not push model and image processor to the hub',
)
a = parser.parse_args()
a = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 650
|
import io
import math
from typing import Dict, Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
get_image_size,
infer_channel_dimension_format,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_vision_available, logging
from ...utils.import_utils import requires_backends
if is_vision_available():
import textwrap
from PIL import Image, ImageDraw, ImageFont
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
a = False
a = logging.get_logger(__name__)
a = 'ybelkada/fonts'
def UpperCAmelCase_ ( ):
if is_torch_available() and not is_torch_greater_or_equal_than_1_11:
raise ImportError(
F'''You are using torch=={torch.__version__}, but torch>=1.11.0 is required to use '''
"""Pix2StructImageProcessor. Please upgrade torch.""" )
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
requires_backends(UpperCAmelCase__ , ["""torch"""] )
_check_torch_version()
lowercase_ = image_tensor.unsqueeze(0 )
lowercase_ = torch.nn.functional.unfold(UpperCAmelCase__ , (patch_height, patch_width) , stride=(patch_height, patch_width) )
lowercase_ = patches.reshape(image_tensor.size(0 ) , image_tensor.size(1 ) , UpperCAmelCase__ , UpperCAmelCase__ , -1 )
lowercase_ = patches.permute(0 , 4 , 2 , 3 , 1 ).reshape(
image_tensor.size(2 ) // patch_height , image_tensor.size(3 ) // patch_width , image_tensor.size(1 ) * patch_height * patch_width , )
return patches.unsqueeze(0 )
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ = 3_6 , UpperCAmelCase__ = "black" , UpperCAmelCase__ = "white" , UpperCAmelCase__ = 5 , UpperCAmelCase__ = 5 , UpperCAmelCase__ = 5 , UpperCAmelCase__ = 5 , UpperCAmelCase__ = None , UpperCAmelCase__ = None , ):
requires_backends(UpperCAmelCase__ , """vision""" )
# Add new lines so that each line is no more than 80 characters.
lowercase_ = textwrap.TextWrapper(width=8_0 )
lowercase_ = wrapper.wrap(text=UpperCAmelCase__ )
lowercase_ = """\n""".join(UpperCAmelCase__ )
if font_bytes is not None and font_path is None:
lowercase_ = io.BytesIO(UpperCAmelCase__ )
elif font_path is not None:
lowercase_ = font_path
else:
lowercase_ = hf_hub_download(UpperCAmelCase__ , """Arial.TTF""" )
lowercase_ = ImageFont.truetype(UpperCAmelCase__ , encoding="""UTF-8""" , size=UpperCAmelCase__ )
# Use a temporary canvas to determine the width and height in pixels when
# rendering the text.
lowercase_ = ImageDraw.Draw(Image.new("""RGB""" , (1, 1) , UpperCAmelCase__ ) )
lowercase_ , lowercase_ , lowercase_ , lowercase_ = temp_draw.textbbox((0, 0) , UpperCAmelCase__ , UpperCAmelCase__ )
# Create the actual image with a bit of padding around the text.
lowercase_ = text_width + left_padding + right_padding
lowercase_ = text_height + top_padding + bottom_padding
lowercase_ = Image.new("""RGB""" , (image_width, image_height) , UpperCAmelCase__ )
lowercase_ = ImageDraw.Draw(UpperCAmelCase__ )
draw.text(xy=(left_padding, top_padding) , text=UpperCAmelCase__ , fill=UpperCAmelCase__ , font=UpperCAmelCase__ )
return image
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ):
requires_backends(UpperCAmelCase__ , """vision""" )
# Convert to PIL image if necessary
lowercase_ = to_pil_image(UpperCAmelCase__ )
lowercase_ = render_text(UpperCAmelCase__ , **UpperCAmelCase__ )
lowercase_ = max(header_image.width , image.width )
lowercase_ = int(image.height * (new_width / image.width) )
lowercase_ = int(header_image.height * (new_width / header_image.width) )
lowercase_ = Image.new("""RGB""" , (new_width, new_height + new_header_height) , """white""" )
new_image.paste(header_image.resize((new_width, new_header_height) ) , (0, 0) )
new_image.paste(image.resize((new_width, new_height) ) , (0, new_header_height) )
# Convert back to the original framework if necessary
lowercase_ = to_numpy_array(UpperCAmelCase__ )
if infer_channel_dimension_format(UpperCAmelCase__ ) == ChannelDimension.LAST:
lowercase_ = to_channel_dimension_format(UpperCAmelCase__ , ChannelDimension.LAST )
return new_image
class UpperCamelCase__ ( __magic_name__ ):
__SCREAMING_SNAKE_CASE : Tuple = ['flattened_patches']
def __init__( self : str , UpperCamelCase__ : bool = True , UpperCamelCase__ : bool = True , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : int = 2_048 , UpperCamelCase__ : bool = False , **UpperCamelCase__ : Optional[int] , ):
'''simple docstring'''
super().__init__(**UpperCamelCase__ )
lowercase_ = patch_size if patch_size is not None else {"""height""": 16, """width""": 16}
lowercase_ = do_normalize
lowercase_ = do_convert_rgb
lowercase_ = max_patches
lowercase_ = is_vqa
def UpperCAmelCase__ ( self : Optional[Any] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : int , UpperCamelCase__ : dict , **UpperCamelCase__ : Optional[int] ):
'''simple docstring'''
requires_backends(self.extract_flattened_patches , """torch""" )
_check_torch_version()
# convert to torch
lowercase_ = to_channel_dimension_format(UpperCamelCase__ , ChannelDimension.FIRST )
lowercase_ = torch.from_numpy(UpperCamelCase__ )
lowercase_ , lowercase_ = patch_size["""height"""], patch_size["""width"""]
lowercase_ , lowercase_ = get_image_size(UpperCamelCase__ )
# maximize scale s.t.
lowercase_ = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width) )
lowercase_ = max(min(math.floor(scale * image_height / patch_height ) , UpperCamelCase__ ) , 1 )
lowercase_ = max(min(math.floor(scale * image_width / patch_width ) , UpperCamelCase__ ) , 1 )
lowercase_ = max(num_feasible_rows * patch_height , 1 )
lowercase_ = max(num_feasible_cols * patch_width , 1 )
lowercase_ = torch.nn.functional.interpolate(
image.unsqueeze(0 ) , size=(resized_height, resized_width) , mode="""bilinear""" , align_corners=UpperCamelCase__ , antialias=UpperCamelCase__ , ).squeeze(0 )
# [1, rows, columns, patch_height * patch_width * image_channels]
lowercase_ = torch_extract_patches(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
lowercase_ = patches.shape
lowercase_ = patches_shape[1]
lowercase_ = patches_shape[2]
lowercase_ = patches_shape[3]
# [rows * columns, patch_height * patch_width * image_channels]
lowercase_ = patches.reshape([rows * columns, depth] )
# [rows * columns, 1]
lowercase_ = torch.arange(UpperCamelCase__ ).reshape([rows, 1] ).repeat(1 , UpperCamelCase__ ).reshape([rows * columns, 1] )
lowercase_ = torch.arange(UpperCamelCase__ ).reshape([1, columns] ).repeat(UpperCamelCase__ , 1 ).reshape([rows * columns, 1] )
# Offset by 1 so the ids do not contain zeros, which represent padding.
row_ids += 1
col_ids += 1
# Prepare additional patch features.
# [rows * columns, 1]
lowercase_ = row_ids.to(torch.floataa )
lowercase_ = col_ids.to(torch.floataa )
# [rows * columns, 2 + patch_height * patch_width * image_channels]
lowercase_ = torch.cat([row_ids, col_ids, patches] , -1 )
# [max_patches, 2 + patch_height * patch_width * image_channels]
lowercase_ = torch.nn.functional.pad(UpperCamelCase__ , [0, 0, 0, max_patches - (rows * columns)] ).float()
lowercase_ = to_numpy_array(UpperCamelCase__ )
return result
def UpperCAmelCase__ ( self : List[Any] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Dict ):
'''simple docstring'''
if image.dtype == np.uinta:
lowercase_ = image.astype(np.floataa )
# take mean across the whole `image`
lowercase_ = np.mean(UpperCamelCase__ )
lowercase_ = np.std(UpperCamelCase__ )
lowercase_ = max(UpperCamelCase__ , 1.0 / math.sqrt(np.prod(image.shape ) ) )
return normalize(UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ , **UpperCamelCase__ )
def UpperCAmelCase__ ( self : str , UpperCamelCase__ : ImageInput , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[Dict[str, int]] = None , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , UpperCamelCase__ : ChannelDimension = ChannelDimension.FIRST , **UpperCamelCase__ : Union[str, Any] , ):
'''simple docstring'''
lowercase_ = do_normalize if do_normalize is not None else self.do_normalize
lowercase_ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
lowercase_ = patch_size if patch_size is not None else self.patch_size
lowercase_ = max_patches if max_patches is not None else self.max_patches
lowercase_ = self.is_vqa
if kwargs.get("""data_format""" , UpperCamelCase__ ) is not None:
raise ValueError("""data_format is not an accepted input as the outputs are """ )
lowercase_ = make_list_of_images(UpperCamelCase__ )
if not valid_images(UpperCamelCase__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
lowercase_ = [convert_to_rgb(UpperCamelCase__ ) for image in images]
# All transformations expect numpy arrays.
lowercase_ = [to_numpy_array(UpperCamelCase__ ) for image in images]
if is_vqa:
if header_text is None:
raise ValueError("""A header text must be provided for VQA models.""" )
lowercase_ = kwargs.pop("""font_bytes""" , UpperCamelCase__ )
lowercase_ = kwargs.pop("""font_path""" , UpperCamelCase__ )
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
lowercase_ = [header_text] * len(UpperCamelCase__ )
lowercase_ = [
render_header(UpperCamelCase__ , header_text[i] , font_bytes=UpperCamelCase__ , font_path=UpperCamelCase__ )
for i, image in enumerate(UpperCamelCase__ )
]
if do_normalize:
lowercase_ = [self.normalize(image=UpperCamelCase__ ) for image in images]
# convert to torch tensor and permute
lowercase_ = [
self.extract_flattened_patches(image=UpperCamelCase__ , max_patches=UpperCamelCase__ , patch_size=UpperCamelCase__ )
for image in images
]
# create attention mask in numpy
lowercase_ = [(image.sum(axis=-1 ) != 0).astype(np.floataa ) for image in images]
lowercase_ = BatchFeature(
data={"""flattened_patches""": images, """attention_mask""": attention_masks} , tensor_type=UpperCamelCase__ )
return encoded_outputs
| 650
| 1
|
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/text-classification/requirements.txt')
a = logging.getLogger(__name__)
@dataclass
class UpperCamelCase__ :
__SCREAMING_SNAKE_CASE : Optional[int] = field(
default=128 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
__SCREAMING_SNAKE_CASE : bool = field(
default=__magic_name__ , metadata={'help': 'Overwrite the cached preprocessed datasets or not.'} )
__SCREAMING_SNAKE_CASE : bool = field(
default=__magic_name__ , metadata={
'help': (
'Whether to pad all samples to `max_seq_length`. '
'If False, will pad the samples dynamically when batching to the maximum length in the batch.'
)
} , )
__SCREAMING_SNAKE_CASE : Optional[int] = field(
default=__magic_name__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
__SCREAMING_SNAKE_CASE : Optional[int] = field(
default=__magic_name__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
__SCREAMING_SNAKE_CASE : Optional[int] = field(
default=__magic_name__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of prediction examples to this '
'value if set.'
)
} , )
@dataclass
class UpperCamelCase__ :
__SCREAMING_SNAKE_CASE : str = field(
default=__magic_name__ , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
__SCREAMING_SNAKE_CASE : str = field(
default=__magic_name__ , metadata={'help': 'Evaluation language. Also train language if `train_language` is set to None.'} )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__magic_name__ , metadata={'help': 'Train language if it is different from the evaluation language.'} )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__magic_name__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__magic_name__ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__magic_name__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
__SCREAMING_SNAKE_CASE : Optional[bool] = field(
default=__magic_name__ , metadata={'help': 'arg to indicate if tokenizer should do lower case in AutoTokenizer.from_pretrained()'} , )
__SCREAMING_SNAKE_CASE : bool = field(
default=__magic_name__ , metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'} , )
__SCREAMING_SNAKE_CASE : str = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
__SCREAMING_SNAKE_CASE : bool = field(
default=__magic_name__ , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
__SCREAMING_SNAKE_CASE : bool = field(
default=__magic_name__ , metadata={'help': 'Will enable to load a pretrained model whose head dimensions are different.'} , )
def UpperCAmelCase_ ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowercase_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
lowercase_ , lowercase_ , lowercase_ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_xnli""" , UpperCAmelCase__ )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowercase_ = training_args.get_process_log_level()
logger.setLevel(UpperCAmelCase__ )
datasets.utils.logging.set_verbosity(UpperCAmelCase__ )
transformers.utils.logging.set_verbosity(UpperCAmelCase__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
lowercase_ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowercase_ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# In distributed training, the load_dataset function guarantees that only one local process can concurrently
# download the dataset.
# Downloading and loading xnli dataset from the hub.
if training_args.do_train:
if model_args.train_language is None:
lowercase_ = load_dataset(
"""xnli""" , model_args.language , split="""train""" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
lowercase_ = load_dataset(
"""xnli""" , model_args.train_language , split="""train""" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
lowercase_ = train_dataset.features["""label"""].names
if training_args.do_eval:
lowercase_ = load_dataset(
"""xnli""" , model_args.language , split="""validation""" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
lowercase_ = eval_dataset.features["""label"""].names
if training_args.do_predict:
lowercase_ = load_dataset(
"""xnli""" , model_args.language , split="""test""" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
lowercase_ = predict_dataset.features["""label"""].names
# Labels
lowercase_ = len(UpperCAmelCase__ )
# Load pretrained model and tokenizer
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowercase_ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=UpperCAmelCase__ , idalabel={str(UpperCAmelCase__ ): label for i, label in enumerate(UpperCAmelCase__ )} , labelaid={label: i for i, label in enumerate(UpperCAmelCase__ )} , finetuning_task="""xnli""" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowercase_ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , do_lower_case=model_args.do_lower_case , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowercase_ = AutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=UpperCAmelCase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# Preprocessing the datasets
# Padding strategy
if data_args.pad_to_max_length:
lowercase_ = """max_length"""
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
lowercase_ = False
def preprocess_function(UpperCAmelCase__ ):
# Tokenize the texts
return tokenizer(
examples["""premise"""] , examples["""hypothesis"""] , padding=UpperCAmelCase__ , max_length=data_args.max_seq_length , truncation=UpperCAmelCase__ , )
if training_args.do_train:
if data_args.max_train_samples is not None:
lowercase_ = min(len(UpperCAmelCase__ ) , data_args.max_train_samples )
lowercase_ = train_dataset.select(range(UpperCAmelCase__ ) )
with training_args.main_process_first(desc="""train dataset map pre-processing""" ):
lowercase_ = train_dataset.map(
UpperCAmelCase__ , batched=UpperCAmelCase__ , load_from_cache_file=not data_args.overwrite_cache , desc="""Running tokenizer on train dataset""" , )
# Log a few random samples from the training set:
for index in random.sample(range(len(UpperCAmelCase__ ) ) , 3 ):
logger.info(F'''Sample {index} of the training set: {train_dataset[index]}.''' )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
lowercase_ = min(len(UpperCAmelCase__ ) , data_args.max_eval_samples )
lowercase_ = eval_dataset.select(range(UpperCAmelCase__ ) )
with training_args.main_process_first(desc="""validation dataset map pre-processing""" ):
lowercase_ = eval_dataset.map(
UpperCAmelCase__ , batched=UpperCAmelCase__ , load_from_cache_file=not data_args.overwrite_cache , desc="""Running tokenizer on validation dataset""" , )
if training_args.do_predict:
if data_args.max_predict_samples is not None:
lowercase_ = min(len(UpperCAmelCase__ ) , data_args.max_predict_samples )
lowercase_ = predict_dataset.select(range(UpperCAmelCase__ ) )
with training_args.main_process_first(desc="""prediction dataset map pre-processing""" ):
lowercase_ = predict_dataset.map(
UpperCAmelCase__ , batched=UpperCAmelCase__ , load_from_cache_file=not data_args.overwrite_cache , desc="""Running tokenizer on prediction dataset""" , )
# Get the metric function
lowercase_ = evaluate.load("""xnli""" )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(UpperCAmelCase__ ):
lowercase_ = p.predictions[0] if isinstance(p.predictions , UpperCAmelCase__ ) else p.predictions
lowercase_ = np.argmax(UpperCAmelCase__ , axis=1 )
return metric.compute(predictions=UpperCAmelCase__ , references=p.label_ids )
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
lowercase_ = default_data_collator
elif training_args.fpaa:
lowercase_ = DataCollatorWithPadding(UpperCAmelCase__ , pad_to_multiple_of=8 )
else:
lowercase_ = None
# Initialize our Trainer
lowercase_ = Trainer(
model=UpperCAmelCase__ , args=UpperCAmelCase__ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=UpperCAmelCase__ , tokenizer=UpperCAmelCase__ , data_collator=UpperCAmelCase__ , )
# Training
if training_args.do_train:
lowercase_ = None
if training_args.resume_from_checkpoint is not None:
lowercase_ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowercase_ = last_checkpoint
lowercase_ = trainer.train(resume_from_checkpoint=UpperCAmelCase__ )
lowercase_ = train_result.metrics
lowercase_ = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(UpperCAmelCase__ )
)
lowercase_ = min(UpperCAmelCase__ , len(UpperCAmelCase__ ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics("""train""" , UpperCAmelCase__ )
trainer.save_metrics("""train""" , UpperCAmelCase__ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
lowercase_ = trainer.evaluate(eval_dataset=UpperCAmelCase__ )
lowercase_ = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(UpperCAmelCase__ )
lowercase_ = min(UpperCAmelCase__ , len(UpperCAmelCase__ ) )
trainer.log_metrics("""eval""" , UpperCAmelCase__ )
trainer.save_metrics("""eval""" , UpperCAmelCase__ )
# Prediction
if training_args.do_predict:
logger.info("""*** Predict ***""" )
lowercase_ , lowercase_ , lowercase_ = trainer.predict(UpperCAmelCase__ , metric_key_prefix="""predict""" )
lowercase_ = (
data_args.max_predict_samples if data_args.max_predict_samples is not None else len(UpperCAmelCase__ )
)
lowercase_ = min(UpperCAmelCase__ , len(UpperCAmelCase__ ) )
trainer.log_metrics("""predict""" , UpperCAmelCase__ )
trainer.save_metrics("""predict""" , UpperCAmelCase__ )
lowercase_ = np.argmax(UpperCAmelCase__ , axis=1 )
lowercase_ = os.path.join(training_args.output_dir , """predictions.txt""" )
if trainer.is_world_process_zero():
with open(UpperCAmelCase__ , """w""" ) as writer:
writer.write("""index\tprediction\n""" )
for index, item in enumerate(UpperCAmelCase__ ):
lowercase_ = label_list[item]
writer.write(F'''{index}\t{item}\n''' )
if __name__ == "__main__":
main()
| 650
|
import cva
import numpy as np
class UpperCamelCase__ :
def __init__( self : List[str] , UpperCamelCase__ : float , UpperCamelCase__ : int ):
'''simple docstring'''
if k in (0.04, 0.06):
lowercase_ = k
lowercase_ = window_size
else:
raise ValueError("""invalid k value""" )
def __str__( self : Optional[int] ):
'''simple docstring'''
return str(self.k )
def UpperCAmelCase__ ( self : Optional[Any] , UpperCamelCase__ : str ):
'''simple docstring'''
lowercase_ = cva.imread(UpperCamelCase__ , 0 )
lowercase_ , lowercase_ = img.shape
lowercase_ = []
lowercase_ = img.copy()
lowercase_ = cva.cvtColor(UpperCamelCase__ , cva.COLOR_GRAY2RGB )
lowercase_ , lowercase_ = np.gradient(UpperCamelCase__ )
lowercase_ = dx**2
lowercase_ = dy**2
lowercase_ = dx * dy
lowercase_ = 0.04
lowercase_ = self.window_size // 2
for y in range(UpperCamelCase__ , h - offset ):
for x in range(UpperCamelCase__ , w - offset ):
lowercase_ = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowercase_ = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowercase_ = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowercase_ = (wxx * wyy) - (wxy**2)
lowercase_ = wxx + wyy
lowercase_ = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 255 )
return color_img, corner_list
if __name__ == "__main__":
a = HarrisCorner(0.04, 3)
a , a = edge_detect.detect('path_to_image')
cva.imwrite('detect.png', color_img)
| 650
| 1
|
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
a = random.Random()
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__=1.0 , UpperCAmelCase__=None , UpperCAmelCase__=None ):
if rng is None:
lowercase_ = global_rng
lowercase_ = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class UpperCamelCase__ ( unittest.TestCase ):
def __init__( self : Dict , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Union[str, Any]=7 , UpperCamelCase__ : str=400 , UpperCamelCase__ : Union[str, Any]=2_000 , UpperCamelCase__ : Dict=10 , UpperCamelCase__ : Optional[Any]=160 , UpperCamelCase__ : Dict=8 , UpperCamelCase__ : Tuple=0.0 , UpperCamelCase__ : List[Any]=4_000 , UpperCamelCase__ : List[str]=False , UpperCamelCase__ : List[Any]=True , ):
'''simple docstring'''
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = min_seq_length
lowercase_ = max_seq_length
lowercase_ = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
lowercase_ = padding_value
lowercase_ = sampling_rate
lowercase_ = return_attention_mask
lowercase_ = do_normalize
lowercase_ = feature_size
lowercase_ = chunk_length
lowercase_ = hop_length
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def UpperCAmelCase__ ( self : Optional[Any] , UpperCamelCase__ : Any=False , UpperCamelCase__ : Dict=False ):
'''simple docstring'''
def _flatten(UpperCamelCase__ : Optional[Any] ):
return list(itertools.chain(*UpperCamelCase__ ) )
if equal_length:
lowercase_ = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
lowercase_ = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
lowercase_ = [np.asarray(UpperCamelCase__ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class UpperCamelCase__ ( __magic_name__ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[Any] = WhisperFeatureExtractor if is_speech_available() else None
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
lowercase_ = WhisperFeatureExtractionTester(self )
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
lowercase_ = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase_ = feat_extract_first.save_pretrained(UpperCamelCase__ )[0]
check_json_file_has_correct_format(UpperCamelCase__ )
lowercase_ = self.feature_extraction_class.from_pretrained(UpperCamelCase__ )
lowercase_ = feat_extract_first.to_dict()
lowercase_ = feat_extract_second.to_dict()
lowercase_ = feat_extract_first.mel_filters
lowercase_ = feat_extract_second.mel_filters
self.assertTrue(np.allclose(UpperCamelCase__ , UpperCamelCase__ ) )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
lowercase_ = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase_ = os.path.join(UpperCamelCase__ , """feat_extract.json""" )
feat_extract_first.to_json_file(UpperCamelCase__ )
lowercase_ = self.feature_extraction_class.from_json_file(UpperCamelCase__ )
lowercase_ = feat_extract_first.to_dict()
lowercase_ = feat_extract_second.to_dict()
lowercase_ = feat_extract_first.mel_filters
lowercase_ = feat_extract_second.mel_filters
self.assertTrue(np.allclose(UpperCamelCase__ , UpperCamelCase__ ) )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
lowercase_ = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
lowercase_ = [np.asarray(UpperCamelCase__ ) for speech_input in speech_inputs]
# Test feature size
lowercase_ = feature_extractor(UpperCamelCase__ , padding="""max_length""" , return_tensors="""np""" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
lowercase_ = feature_extractor(speech_inputs[0] , return_tensors="""np""" ).input_features
lowercase_ = feature_extractor(np_speech_inputs[0] , return_tensors="""np""" ).input_features
self.assertTrue(np.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-3 ) )
# Test batched
lowercase_ = feature_extractor(UpperCamelCase__ , return_tensors="""np""" ).input_features
lowercase_ = feature_extractor(UpperCamelCase__ , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(UpperCamelCase__ , UpperCamelCase__ ):
self.assertTrue(np.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
lowercase_ = [floats_list((1, x) )[0] for x in (800, 800, 800)]
lowercase_ = np.asarray(UpperCamelCase__ )
lowercase_ = feature_extractor(UpperCamelCase__ , return_tensors="""np""" ).input_features
lowercase_ = feature_extractor(UpperCamelCase__ , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(UpperCamelCase__ , UpperCamelCase__ ):
self.assertTrue(np.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-3 ) )
# Test truncation required
lowercase_ = [floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )]
lowercase_ = [np.asarray(UpperCamelCase__ ) for speech_input in speech_inputs]
lowercase_ = [x[: feature_extractor.n_samples] for x in speech_inputs]
lowercase_ = [np.asarray(UpperCamelCase__ ) for speech_input in speech_inputs_truncated]
lowercase_ = feature_extractor(UpperCamelCase__ , return_tensors="""np""" ).input_features
lowercase_ = feature_extractor(UpperCamelCase__ , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(UpperCamelCase__ , UpperCamelCase__ ):
self.assertTrue(np.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-3 ) )
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
import torch
lowercase_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowercase_ = np.random.rand(100 , 32 ).astype(np.floataa )
lowercase_ = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
lowercase_ = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""np""" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
lowercase_ = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""pt""" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def UpperCAmelCase__ ( self : Dict , UpperCamelCase__ : Any ):
'''simple docstring'''
lowercase_ = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
lowercase_ = ds.sort("""id""" ).select(range(UpperCamelCase__ ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
lowercase_ = torch.tensor(
[
0.1_193, -0.0_946, -0.1_098, -0.0_196, 0.0_225, -0.0_690, -0.1_736, 0.0_951,
0.0_971, -0.0_817, -0.0_702, 0.0_162, 0.0_260, 0.0_017, -0.0_192, -0.1_678,
0.0_709, -0.1_867, -0.0_655, -0.0_274, -0.0_234, -0.1_884, -0.0_516, -0.0_554,
-0.0_274, -0.1_425, -0.1_423, 0.0_837, 0.0_377, -0.0_854
] )
# fmt: on
lowercase_ = self._load_datasamples(1 )
lowercase_ = WhisperFeatureExtractor()
lowercase_ = feature_extractor(UpperCamelCase__ , return_tensors="""pt""" ).input_features
self.assertEqual(input_features.shape , (1, 80, 3_000) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , UpperCamelCase__ , atol=1e-4 ) )
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowercase_ = self._load_datasamples(1 )[0]
lowercase_ = ((audio - audio.min()) / (audio.max() - audio.min())) * 65_535 # Rescale to [0, 65535] to show issue
lowercase_ = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=UpperCamelCase__ )[0]
self.assertTrue(np.all(np.mean(UpperCamelCase__ ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(UpperCamelCase__ ) - 1 ) < 1e-3 ) )
| 650
|
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse('9.1.0'):
a = {
'linear': PIL.Image.Resampling.BILINEAR,
'bilinear': PIL.Image.Resampling.BILINEAR,
'bicubic': PIL.Image.Resampling.BICUBIC,
'lanczos': PIL.Image.Resampling.LANCZOS,
'nearest': PIL.Image.Resampling.NEAREST,
}
else:
a = {
'linear': PIL.Image.LINEAR,
'bilinear': PIL.Image.BILINEAR,
'bicubic': PIL.Image.BICUBIC,
'lanczos': PIL.Image.LANCZOS,
'nearest': PIL.Image.NEAREST,
}
def UpperCAmelCase_ ( UpperCAmelCase__ ):
lowercase_ = (images / 2 + 0.5).clamp(0 , 1 )
lowercase_ = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
lowercase_ = numpy_to_pil(UpperCAmelCase__ )
return images
def UpperCAmelCase_ ( UpperCAmelCase__ ):
if images.ndim == 3:
lowercase_ = images[None, ...]
lowercase_ = (images * 2_5_5).round().astype("""uint8""" )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
lowercase_ = [Image.fromarray(image.squeeze() , mode="""L""" ) for image in images]
else:
lowercase_ = [Image.fromarray(UpperCAmelCase__ ) for image in images]
return pil_images
| 650
| 1
|
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
a = logging.get_logger(__name__)
class UpperCamelCase__ ( __magic_name__ ):
__SCREAMING_SNAKE_CASE : List[Any] = ['pixel_values']
def __init__( self : List[str] , UpperCamelCase__ : bool = True , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase__ : bool = True , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : bool = True , UpperCamelCase__ : Union[int, float] = 1 / 255 , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_MEAN , UpperCamelCase__ : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_STD , **UpperCamelCase__ : Dict , ):
'''simple docstring'''
super().__init__(**UpperCamelCase__ )
lowercase_ = size if size is not None else {"""shortest_edge""": 224}
lowercase_ = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ )
lowercase_ = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
lowercase_ = get_size_dict(UpperCamelCase__ , param_name="""crop_size""" )
lowercase_ = do_resize
lowercase_ = size
lowercase_ = resample
lowercase_ = do_center_crop
lowercase_ = crop_size
lowercase_ = do_rescale
lowercase_ = rescale_factor
lowercase_ = do_normalize
lowercase_ = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
lowercase_ = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def UpperCAmelCase__ ( self : int , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Optional[int] , ):
'''simple docstring'''
lowercase_ = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
lowercase_ = int((256 / 224) * size["""shortest_edge"""] )
lowercase_ = get_resize_output_image_size(UpperCamelCase__ , size=UpperCamelCase__ , default_to_square=UpperCamelCase__ )
lowercase_ = {"""height""": output_size[0], """width""": output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
F'''Size dict must have keys \'height\' and \'width\' or \'shortest_edge\'. Got {size_dict.keys()}''' )
return resize(
UpperCamelCase__ , size=(size_dict["""height"""], size_dict["""width"""]) , resample=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def UpperCAmelCase__ ( self : str , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : str , ):
'''simple docstring'''
lowercase_ = get_size_dict(UpperCamelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(F'''Size dict must have keys \'height\' and \'width\'. Got {size.keys()}''' )
return center_crop(UpperCamelCase__ , size=(size["""height"""], size["""width"""]) , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def UpperCAmelCase__ ( self : Optional[int] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Union[int, float] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : str , ):
'''simple docstring'''
return rescale(UpperCamelCase__ , scale=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def UpperCAmelCase__ ( self : List[str] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Optional[int] , ):
'''simple docstring'''
return normalize(UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def UpperCAmelCase__ ( self : Dict , UpperCamelCase__ : ImageInput , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[Dict[str, int]] = None , UpperCamelCase__ : PILImageResampling = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[Dict[str, int]] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[float] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[Union[float, Iterable[float]]] = None , UpperCamelCase__ : Optional[Union[float, Iterable[float]]] = None , UpperCamelCase__ : Optional[TensorType] = None , UpperCamelCase__ : ChannelDimension = ChannelDimension.FIRST , **UpperCamelCase__ : Optional[int] , ):
'''simple docstring'''
lowercase_ = do_resize if do_resize is not None else self.do_resize
lowercase_ = resample if resample is not None else self.resample
lowercase_ = do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase_ = do_rescale if do_rescale is not None else self.do_rescale
lowercase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase_ = do_normalize if do_normalize is not None else self.do_normalize
lowercase_ = image_mean if image_mean is not None else self.image_mean
lowercase_ = image_std if image_std is not None else self.image_std
lowercase_ = size if size is not None else self.size
lowercase_ = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ )
lowercase_ = crop_size if crop_size is not None else self.crop_size
lowercase_ = get_size_dict(UpperCamelCase__ , param_name="""crop_size""" )
lowercase_ = make_list_of_images(UpperCamelCase__ )
if not valid_images(UpperCamelCase__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
lowercase_ = [to_numpy_array(UpperCamelCase__ ) for image in images]
if do_resize:
lowercase_ = [self.resize(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) for image in images]
if do_center_crop:
lowercase_ = [self.center_crop(UpperCamelCase__ , UpperCamelCase__ ) for image in images]
if do_rescale:
lowercase_ = [self.rescale(UpperCamelCase__ , UpperCamelCase__ ) for image in images]
if do_normalize:
lowercase_ = [self.normalize(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) for image in images]
lowercase_ = [to_channel_dimension_format(UpperCamelCase__ , UpperCamelCase__ ) for image in images]
lowercase_ = {"""pixel_values""": images}
return BatchFeature(data=UpperCamelCase__ , tensor_type=UpperCamelCase__ )
| 650
|
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCamelCase__ ( __magic_name__ ):
__SCREAMING_SNAKE_CASE : str = (UnCLIPScheduler,)
def UpperCAmelCase__ ( self : int , **UpperCamelCase__ : int ):
'''simple docstring'''
lowercase_ = {
"""num_train_timesteps""": 1_000,
"""variance_type""": """fixed_small_log""",
"""clip_sample""": True,
"""clip_sample_range""": 1.0,
"""prediction_type""": """epsilon""",
}
config.update(**UpperCamelCase__ )
return config
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
for timesteps in [1, 5, 100, 1_000]:
self.check_over_configs(num_train_timesteps=UpperCamelCase__ )
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=UpperCamelCase__ )
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=UpperCamelCase__ )
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=UpperCamelCase__ )
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=UpperCamelCase__ )
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
for time_step in [0, 500, 999]:
for prev_timestep in [None, 5, 100, 250, 500, 750]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=UpperCamelCase__ , prev_timestep=UpperCamelCase__ )
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ = self.scheduler_classes[0]
lowercase_ = self.get_scheduler_config(variance_type="""fixed_small_log""" )
lowercase_ = scheduler_class(**UpperCamelCase__ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0000e-10 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_549_625 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.9_994_987 ) ) < 1e-5
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
lowercase_ = self.scheduler_classes[0]
lowercase_ = self.get_scheduler_config(variance_type="""learned_range""" )
lowercase_ = scheduler_class(**UpperCamelCase__ )
lowercase_ = 0.5
assert scheduler._get_variance(1 , predicted_variance=UpperCamelCase__ ) - -10.1_712_790 < 1e-5
assert scheduler._get_variance(487 , predicted_variance=UpperCamelCase__ ) - -5.7_998_052 < 1e-5
assert scheduler._get_variance(999 , predicted_variance=UpperCamelCase__ ) - -0.0_010_011 < 1e-5
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ = self.scheduler_classes[0]
lowercase_ = self.get_scheduler_config()
lowercase_ = scheduler_class(**UpperCamelCase__ )
lowercase_ = scheduler.timesteps
lowercase_ = self.dummy_model()
lowercase_ = self.dummy_sample_deter
lowercase_ = torch.manual_seed(0 )
for i, t in enumerate(UpperCamelCase__ ):
# 1. predict noise residual
lowercase_ = model(UpperCamelCase__ , UpperCamelCase__ )
# 2. predict previous mean of sample x_t-1
lowercase_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , generator=UpperCamelCase__ ).prev_sample
lowercase_ = pred_prev_sample
lowercase_ = torch.sum(torch.abs(UpperCamelCase__ ) )
lowercase_ = torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_sum.item() - 252.2_682_495 ) < 1e-2
assert abs(result_mean.item() - 0.3_284_743 ) < 1e-3
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
lowercase_ = self.scheduler_classes[0]
lowercase_ = self.get_scheduler_config()
lowercase_ = scheduler_class(**UpperCamelCase__ )
scheduler.set_timesteps(25 )
lowercase_ = scheduler.timesteps
lowercase_ = self.dummy_model()
lowercase_ = self.dummy_sample_deter
lowercase_ = torch.manual_seed(0 )
for i, t in enumerate(UpperCamelCase__ ):
# 1. predict noise residual
lowercase_ = model(UpperCamelCase__ , UpperCamelCase__ )
if i + 1 == timesteps.shape[0]:
lowercase_ = None
else:
lowercase_ = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
lowercase_ = scheduler.step(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , prev_timestep=UpperCamelCase__ , generator=UpperCamelCase__ ).prev_sample
lowercase_ = pred_prev_sample
lowercase_ = torch.sum(torch.abs(UpperCamelCase__ ) )
lowercase_ = torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_sum.item() - 258.2_044_983 ) < 1e-2
assert abs(result_mean.item() - 0.3_362_038 ) < 1e-3
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
pass
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
pass
| 650
| 1
|
def UpperCAmelCase_ ( UpperCAmelCase__ ):
lowercase_ = hex_num.strip()
if not hex_num:
raise ValueError("""No value was passed to the function""" )
lowercase_ = hex_num[0] == """-"""
if is_negative:
lowercase_ = hex_num[1:]
try:
lowercase_ = int(UpperCAmelCase__ , 1_6 )
except ValueError:
raise ValueError("""Invalid value was passed to the function""" )
lowercase_ = """"""
while int_num > 0:
lowercase_ = str(int_num % 2 ) + bin_str
int_num >>= 1
return int(("""-""" + bin_str) if is_negative else bin_str )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 650
|
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import numpy as np
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForMaskedImageModeling,
HfArgumentParser,
Trainer,
TrainingArguments,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
a = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt')
a = list(MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING.keys())
a = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class UpperCamelCase__ :
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default='cifar10' , metadata={'help': 'Name of a dataset from the datasets package'} )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__magic_name__ , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__magic_name__ , metadata={'help': 'The column name of the images in the files. If not set, will try to use \'image\' or \'img\'.'} , )
__SCREAMING_SNAKE_CASE : Optional[str] = field(default=__magic_name__ , metadata={'help': 'A folder containing the training data.'} )
__SCREAMING_SNAKE_CASE : Optional[str] = field(default=__magic_name__ , metadata={'help': 'A folder containing the validation data.'} )
__SCREAMING_SNAKE_CASE : Optional[float] = field(
default=0.1_5 , metadata={'help': 'Percent to split off of train for validation.'} )
__SCREAMING_SNAKE_CASE : int = field(default=32 , metadata={'help': 'The size of the square patches to use for masking.'} )
__SCREAMING_SNAKE_CASE : float = field(
default=0.6 , metadata={'help': 'Percentage of patches to mask.'} , )
__SCREAMING_SNAKE_CASE : Optional[int] = field(
default=__magic_name__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
__SCREAMING_SNAKE_CASE : Optional[int] = field(
default=__magic_name__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
lowercase_ = {}
if self.train_dir is not None:
lowercase_ = self.train_dir
if self.validation_dir is not None:
lowercase_ = self.validation_dir
lowercase_ = data_files if data_files else None
@dataclass
class UpperCamelCase__ :
__SCREAMING_SNAKE_CASE : str = field(
default=__magic_name__ , metadata={
'help': (
'The model checkpoint for weights initialization. Can be a local path to a pytorch_model.bin or a '
'checkpoint identifier on the hub. '
'Don\'t set if you want to train a model from scratch.'
)
} , )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__magic_name__ , metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(__magic_name__ )} , )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__magic_name__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__magic_name__ , metadata={
'help': (
'Override some existing default config settings when a model is trained from scratch. Example: '
'n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'
)
} , )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__magic_name__ , metadata={'help': 'Where do you want to store (cache) the pretrained models/datasets downloaded from the hub'} , )
__SCREAMING_SNAKE_CASE : str = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
__SCREAMING_SNAKE_CASE : str = field(default=__magic_name__ , metadata={'help': 'Name or path of preprocessor config.'} )
__SCREAMING_SNAKE_CASE : bool = field(
default=__magic_name__ , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
__SCREAMING_SNAKE_CASE : Optional[int] = field(
default=__magic_name__ , metadata={
'help': (
'The size (resolution) of each image. If not specified, will use `image_size` of the configuration.'
)
} , )
__SCREAMING_SNAKE_CASE : Optional[int] = field(
default=__magic_name__ , metadata={
'help': (
'The size (resolution) of each patch. If not specified, will use `patch_size` of the configuration.'
)
} , )
__SCREAMING_SNAKE_CASE : Optional[int] = field(
default=__magic_name__ , metadata={'help': 'Stride to use for the encoder.'} , )
class UpperCamelCase__ :
def __init__( self : Dict , UpperCamelCase__ : List[Any]=192 , UpperCamelCase__ : Tuple=32 , UpperCamelCase__ : Dict=4 , UpperCamelCase__ : str=0.6 ):
'''simple docstring'''
lowercase_ = input_size
lowercase_ = mask_patch_size
lowercase_ = model_patch_size
lowercase_ = mask_ratio
if self.input_size % self.mask_patch_size != 0:
raise ValueError("""Input size must be divisible by mask patch size""" )
if self.mask_patch_size % self.model_patch_size != 0:
raise ValueError("""Mask patch size must be divisible by model patch size""" )
lowercase_ = self.input_size // self.mask_patch_size
lowercase_ = self.mask_patch_size // self.model_patch_size
lowercase_ = self.rand_size**2
lowercase_ = int(np.ceil(self.token_count * self.mask_ratio ) )
def __call__( self : int ):
'''simple docstring'''
lowercase_ = np.random.permutation(self.token_count )[: self.mask_count]
lowercase_ = np.zeros(self.token_count , dtype=UpperCamelCase__ )
lowercase_ = 1
lowercase_ = mask.reshape((self.rand_size, self.rand_size) )
lowercase_ = mask.repeat(self.scale , axis=0 ).repeat(self.scale , axis=1 )
return torch.tensor(mask.flatten() )
def UpperCAmelCase_ ( UpperCAmelCase__ ):
lowercase_ = torch.stack([example["""pixel_values"""] for example in examples] )
lowercase_ = torch.stack([example["""mask"""] for example in examples] )
return {"pixel_values": pixel_values, "bool_masked_pos": mask}
def UpperCAmelCase_ ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowercase_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowercase_ , lowercase_ , lowercase_ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowercase_ , lowercase_ , lowercase_ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_mim""" , UpperCAmelCase__ , UpperCAmelCase__ )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowercase_ = training_args.get_process_log_level()
logger.setLevel(UpperCAmelCase__ )
transformers.utils.logging.set_verbosity(UpperCAmelCase__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
lowercase_ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowercase_ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Initialize our dataset.
lowercase_ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
lowercase_ = None if """validation""" in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , UpperCAmelCase__ ) and data_args.train_val_split > 0.0:
lowercase_ = ds["""train"""].train_test_split(data_args.train_val_split )
lowercase_ = split["""train"""]
lowercase_ = split["""test"""]
# Create config
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowercase_ = {
"""cache_dir""": model_args.cache_dir,
"""revision""": model_args.model_revision,
"""use_auth_token""": True if model_args.use_auth_token else None,
}
if model_args.config_name_or_path:
lowercase_ = AutoConfig.from_pretrained(model_args.config_name_or_path , **UpperCAmelCase__ )
elif model_args.model_name_or_path:
lowercase_ = AutoConfig.from_pretrained(model_args.model_name_or_path , **UpperCAmelCase__ )
else:
lowercase_ = CONFIG_MAPPING[model_args.model_type]()
logger.warning("""You are instantiating a new config instance from scratch.""" )
if model_args.config_overrides is not None:
logger.info(F'''Overriding config: {model_args.config_overrides}''' )
config.update_from_string(model_args.config_overrides )
logger.info(F'''New config: {config}''' )
# make sure the decoder_type is "simmim" (only relevant for BEiT)
if hasattr(UpperCAmelCase__ , """decoder_type""" ):
lowercase_ = """simmim"""
# adapt config
lowercase_ = model_args.image_size if model_args.image_size is not None else config.image_size
lowercase_ = model_args.patch_size if model_args.patch_size is not None else config.patch_size
lowercase_ = (
model_args.encoder_stride if model_args.encoder_stride is not None else config.encoder_stride
)
config.update(
{
"""image_size""": model_args.image_size,
"""patch_size""": model_args.patch_size,
"""encoder_stride""": model_args.encoder_stride,
} )
# create image processor
if model_args.image_processor_name:
lowercase_ = AutoImageProcessor.from_pretrained(model_args.image_processor_name , **UpperCAmelCase__ )
elif model_args.model_name_or_path:
lowercase_ = AutoImageProcessor.from_pretrained(model_args.model_name_or_path , **UpperCAmelCase__ )
else:
lowercase_ = {
conf.model_type: image_processor_class for conf, image_processor_class in IMAGE_PROCESSOR_MAPPING.items()
}
lowercase_ = IMAGE_PROCESSOR_TYPES[model_args.model_type]()
# create model
if model_args.model_name_or_path:
lowercase_ = AutoModelForMaskedImageModeling.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=UpperCAmelCase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info("""Training new model from scratch""" )
lowercase_ = AutoModelForMaskedImageModeling.from_config(UpperCAmelCase__ )
if training_args.do_train:
lowercase_ = ds["""train"""].column_names
else:
lowercase_ = ds["""validation"""].column_names
if data_args.image_column_name is not None:
lowercase_ = data_args.image_column_name
elif "image" in column_names:
lowercase_ = """image"""
elif "img" in column_names:
lowercase_ = """img"""
else:
lowercase_ = column_names[0]
# transformations as done in original SimMIM paper
# source: https://github.com/microsoft/SimMIM/blob/main/data/data_simmim.py
lowercase_ = Compose(
[
Lambda(lambda UpperCAmelCase__ : img.convert("""RGB""" ) if img.mode != "RGB" else img ),
RandomResizedCrop(model_args.image_size , scale=(0.67, 1.0) , ratio=(3.0 / 4.0, 4.0 / 3.0) ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
# create mask generator
lowercase_ = MaskGenerator(
input_size=model_args.image_size , mask_patch_size=data_args.mask_patch_size , model_patch_size=model_args.patch_size , mask_ratio=data_args.mask_ratio , )
def preprocess_images(UpperCAmelCase__ ):
lowercase_ = [transforms(UpperCAmelCase__ ) for image in examples[image_column_name]]
lowercase_ = [mask_generator() for i in range(len(examples[image_column_name] ) )]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError("""--do_train requires a train dataset""" )
if data_args.max_train_samples is not None:
lowercase_ = ds["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(UpperCAmelCase__ )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError("""--do_eval requires a validation dataset""" )
if data_args.max_eval_samples is not None:
lowercase_ = (
ds["""validation"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(UpperCAmelCase__ )
# Initialize our trainer
lowercase_ = Trainer(
model=UpperCAmelCase__ , args=UpperCAmelCase__ , train_dataset=ds["""train"""] if training_args.do_train else None , eval_dataset=ds["""validation"""] if training_args.do_eval else None , tokenizer=UpperCAmelCase__ , data_collator=UpperCAmelCase__ , )
# Training
if training_args.do_train:
lowercase_ = None
if training_args.resume_from_checkpoint is not None:
lowercase_ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowercase_ = last_checkpoint
lowercase_ = trainer.train(resume_from_checkpoint=UpperCAmelCase__ )
trainer.save_model()
trainer.log_metrics("""train""" , train_result.metrics )
trainer.save_metrics("""train""" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
lowercase_ = trainer.evaluate()
trainer.log_metrics("""eval""" , UpperCAmelCase__ )
trainer.save_metrics("""eval""" , UpperCAmelCase__ )
# Write model card and (optionally) push to hub
lowercase_ = {
"""finetuned_from""": model_args.model_name_or_path,
"""tasks""": """masked-image-modeling""",
"""dataset""": data_args.dataset_name,
"""tags""": ["""masked-image-modeling"""],
}
if training_args.push_to_hub:
trainer.push_to_hub(**UpperCAmelCase__ )
else:
trainer.create_model_card(**UpperCAmelCase__ )
if __name__ == "__main__":
main()
| 650
| 1
|
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
a = Lock()
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 1_0 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(UpperCAmelCase__ )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
lowercase_ = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
lowercase_ = min(UpperCAmelCase__ , UpperCAmelCase__ )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(UpperCAmelCase__ )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
lowercase_ = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
lowercase_ = max(UpperCAmelCase__ , UpperCAmelCase__ )
# after all swaps are performed, send the values back to main
result_pipe[1].send(UpperCAmelCase__ )
def UpperCAmelCase_ ( UpperCAmelCase__ ):
lowercase_ = []
lowercase_ = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
lowercase_ = Pipe()
lowercase_ = Pipe()
process_array_.append(
Process(
target=UpperCAmelCase__ , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
lowercase_ = temp_rs
lowercase_ = temp_rr
for i in range(1 , len(UpperCAmelCase__ ) - 1 ):
lowercase_ = Pipe()
lowercase_ = Pipe()
process_array_.append(
Process(
target=UpperCAmelCase__ , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
lowercase_ = temp_rs
lowercase_ = temp_rr
process_array_.append(
Process(
target=UpperCAmelCase__ , args=(
len(UpperCAmelCase__ ) - 1,
arr[len(UpperCAmelCase__ ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(UpperCAmelCase__ ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(UpperCAmelCase__ ) ):
lowercase_ = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def UpperCAmelCase_ ( ):
lowercase_ = list(range(1_0 , 0 , -1 ) )
print("""Initial List""" )
print(*UpperCAmelCase__ )
lowercase_ = odd_even_transposition(UpperCAmelCase__ )
print("""Sorted List\n""" )
print(*UpperCAmelCase__ )
if __name__ == "__main__":
main()
| 650
|
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
a = logging.get_logger(__name__)
class UpperCamelCase__ ( __magic_name__ ):
__SCREAMING_SNAKE_CASE : List[Any] = ['pixel_values']
def __init__( self : List[str] , UpperCamelCase__ : bool = True , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase__ : bool = True , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : bool = True , UpperCamelCase__ : Union[int, float] = 1 / 255 , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_MEAN , UpperCamelCase__ : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_STD , **UpperCamelCase__ : Dict , ):
'''simple docstring'''
super().__init__(**UpperCamelCase__ )
lowercase_ = size if size is not None else {"""shortest_edge""": 224}
lowercase_ = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ )
lowercase_ = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
lowercase_ = get_size_dict(UpperCamelCase__ , param_name="""crop_size""" )
lowercase_ = do_resize
lowercase_ = size
lowercase_ = resample
lowercase_ = do_center_crop
lowercase_ = crop_size
lowercase_ = do_rescale
lowercase_ = rescale_factor
lowercase_ = do_normalize
lowercase_ = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
lowercase_ = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def UpperCAmelCase__ ( self : int , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Optional[int] , ):
'''simple docstring'''
lowercase_ = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
lowercase_ = int((256 / 224) * size["""shortest_edge"""] )
lowercase_ = get_resize_output_image_size(UpperCamelCase__ , size=UpperCamelCase__ , default_to_square=UpperCamelCase__ )
lowercase_ = {"""height""": output_size[0], """width""": output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
F'''Size dict must have keys \'height\' and \'width\' or \'shortest_edge\'. Got {size_dict.keys()}''' )
return resize(
UpperCamelCase__ , size=(size_dict["""height"""], size_dict["""width"""]) , resample=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def UpperCAmelCase__ ( self : str , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : str , ):
'''simple docstring'''
lowercase_ = get_size_dict(UpperCamelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(F'''Size dict must have keys \'height\' and \'width\'. Got {size.keys()}''' )
return center_crop(UpperCamelCase__ , size=(size["""height"""], size["""width"""]) , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def UpperCAmelCase__ ( self : Optional[int] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Union[int, float] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : str , ):
'''simple docstring'''
return rescale(UpperCamelCase__ , scale=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def UpperCAmelCase__ ( self : List[str] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Optional[int] , ):
'''simple docstring'''
return normalize(UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def UpperCAmelCase__ ( self : Dict , UpperCamelCase__ : ImageInput , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[Dict[str, int]] = None , UpperCamelCase__ : PILImageResampling = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[Dict[str, int]] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[float] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[Union[float, Iterable[float]]] = None , UpperCamelCase__ : Optional[Union[float, Iterable[float]]] = None , UpperCamelCase__ : Optional[TensorType] = None , UpperCamelCase__ : ChannelDimension = ChannelDimension.FIRST , **UpperCamelCase__ : Optional[int] , ):
'''simple docstring'''
lowercase_ = do_resize if do_resize is not None else self.do_resize
lowercase_ = resample if resample is not None else self.resample
lowercase_ = do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase_ = do_rescale if do_rescale is not None else self.do_rescale
lowercase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase_ = do_normalize if do_normalize is not None else self.do_normalize
lowercase_ = image_mean if image_mean is not None else self.image_mean
lowercase_ = image_std if image_std is not None else self.image_std
lowercase_ = size if size is not None else self.size
lowercase_ = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ )
lowercase_ = crop_size if crop_size is not None else self.crop_size
lowercase_ = get_size_dict(UpperCamelCase__ , param_name="""crop_size""" )
lowercase_ = make_list_of_images(UpperCamelCase__ )
if not valid_images(UpperCamelCase__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
lowercase_ = [to_numpy_array(UpperCamelCase__ ) for image in images]
if do_resize:
lowercase_ = [self.resize(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) for image in images]
if do_center_crop:
lowercase_ = [self.center_crop(UpperCamelCase__ , UpperCamelCase__ ) for image in images]
if do_rescale:
lowercase_ = [self.rescale(UpperCamelCase__ , UpperCamelCase__ ) for image in images]
if do_normalize:
lowercase_ = [self.normalize(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) for image in images]
lowercase_ = [to_channel_dimension_format(UpperCamelCase__ , UpperCamelCase__ ) for image in images]
lowercase_ = {"""pixel_values""": images}
return BatchFeature(data=UpperCamelCase__ , tensor_type=UpperCamelCase__ )
| 650
| 1
|
from typing import Dict, Optional
import numpy as np
import datasets
a = '\nIoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union\nbetween the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,\nthe mean IoU of the image is calculated by taking the IoU of each class and averaging them.\n'
a = '\nArgs:\n predictions (`List[ndarray]`):\n List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n references (`List[ndarray]`):\n List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n num_labels (`int`):\n Number of classes (categories).\n ignore_index (`int`):\n Index that will be ignored during evaluation.\n nan_to_num (`int`, *optional*):\n If specified, NaN values will be replaced by the number defined by the user.\n label_map (`dict`, *optional*):\n If specified, dictionary mapping old label indices to new label indices.\n reduce_labels (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,\n and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.\n\nReturns:\n `Dict[str, float | ndarray]` comprising various elements:\n - *mean_iou* (`float`):\n Mean Intersection-over-Union (IoU averaged over all categories).\n - *mean_accuracy* (`float`):\n Mean accuracy (averaged over all categories).\n - *overall_accuracy* (`float`):\n Overall accuracy on all images.\n - *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):\n Per category accuracy.\n - *per_category_iou* (`ndarray` of shape `(num_labels,)`):\n Per category IoU.\n\nExamples:\n\n >>> import numpy as np\n\n >>> mean_iou = datasets.load_metric("mean_iou")\n\n >>> # suppose one has 3 different segmentation maps predicted\n >>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])\n >>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])\n\n >>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])\n >>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])\n\n >>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])\n >>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])\n\n >>> predicted = [predicted_1, predicted_2, predicted_3]\n >>> ground_truth = [actual_1, actual_2, actual_3]\n\n >>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {\'mean_iou\': 0.47750000000000004, \'mean_accuracy\': 0.5916666666666666, \'overall_accuracy\': 0.5263157894736842, \'per_category_iou\': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), \'per_category_accuracy\': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}\n'
a = '\\n@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,\nauthor = {{MMSegmentation Contributors}},\nlicense = {Apache-2.0},\nmonth = {7},\ntitle = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},\nurl = {https://github.com/open-mmlab/mmsegmentation},\nyear = {2020}\n}'
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = None , UpperCAmelCase__ = False , ):
if label_map is not None:
for old_id, new_id in label_map.items():
lowercase_ = new_id
# turn into Numpy arrays
lowercase_ = np.array(UpperCAmelCase__ )
lowercase_ = np.array(UpperCAmelCase__ )
if reduce_labels:
lowercase_ = 2_5_5
lowercase_ = label - 1
lowercase_ = 2_5_5
lowercase_ = label != ignore_index
lowercase_ = np.not_equal(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase_ = pred_label[mask]
lowercase_ = np.array(UpperCAmelCase__ )[mask]
lowercase_ = pred_label[pred_label == label]
lowercase_ = np.histogram(UpperCAmelCase__ , bins=UpperCAmelCase__ , range=(0, num_labels - 1) )[0]
lowercase_ = np.histogram(UpperCAmelCase__ , bins=UpperCAmelCase__ , range=(0, num_labels - 1) )[0]
lowercase_ = np.histogram(UpperCAmelCase__ , bins=UpperCAmelCase__ , range=(0, num_labels - 1) )[0]
lowercase_ = area_pred_label + area_label - area_intersect
return area_intersect, area_union, area_pred_label, area_label
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = None , UpperCAmelCase__ = False , ):
lowercase_ = np.zeros((num_labels,) , dtype=np.floataa )
lowercase_ = np.zeros((num_labels,) , dtype=np.floataa )
lowercase_ = np.zeros((num_labels,) , dtype=np.floataa )
lowercase_ = np.zeros((num_labels,) , dtype=np.floataa )
for result, gt_seg_map in zip(UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase_ , lowercase_ , lowercase_ , lowercase_ = intersect_and_union(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
total_area_intersect += area_intersect
total_area_union += area_union
total_area_pred_label += area_pred_label
total_area_label += area_label
return total_area_intersect, total_area_union, total_area_pred_label, total_area_label
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__ = False , ):
lowercase_ , lowercase_ , lowercase_ , lowercase_ = total_intersect_and_union(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# compute metrics
lowercase_ = {}
lowercase_ = total_area_intersect.sum() / total_area_label.sum()
lowercase_ = total_area_intersect / total_area_union
lowercase_ = total_area_intersect / total_area_label
lowercase_ = np.nanmean(UpperCAmelCase__ )
lowercase_ = np.nanmean(UpperCAmelCase__ )
lowercase_ = all_acc
lowercase_ = iou
lowercase_ = acc
if nan_to_num is not None:
lowercase_ = {metric: np.nan_to_num(UpperCAmelCase__ , nan=UpperCAmelCase__ ) for metric, metric_value in metrics.items()}
return metrics
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase__ ( datasets.Metric ):
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
# 1st Seq - height dim, 2nd - width dim
{
"""predictions""": datasets.Sequence(datasets.Sequence(datasets.Value("""uint16""" ) ) ),
"""references""": datasets.Sequence(datasets.Sequence(datasets.Value("""uint16""" ) ) ),
} ) , reference_urls=[
"""https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py"""
] , )
def UpperCAmelCase__ ( self : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : Any , UpperCamelCase__ : int , UpperCamelCase__ : bool , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[Dict[int, int]] = None , UpperCamelCase__ : bool = False , ):
'''simple docstring'''
lowercase_ = mean_iou(
results=UpperCamelCase__ , gt_seg_maps=UpperCamelCase__ , num_labels=UpperCamelCase__ , ignore_index=UpperCamelCase__ , nan_to_num=UpperCamelCase__ , label_map=UpperCamelCase__ , reduce_labels=UpperCamelCase__ , )
return iou_result
| 650
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
a = {
'configuration_trocr': ['TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TrOCRConfig'],
'processing_trocr': ['TrOCRProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
'TROCR_PRETRAINED_MODEL_ARCHIVE_LIST',
'TrOCRForCausalLM',
'TrOCRPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 650
| 1
|
import os
from pickle import UnpicklingError
from typing import Dict, Tuple
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict, unflatten_dict
import transformers
from .utils import logging
a = logging.get_logger(__name__)
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__=False ):
try:
import torch # noqa: F401
except ImportError:
logger.error(
"""Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see"""
""" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"""
""" instructions.""" )
raise
if not is_sharded:
lowercase_ = os.path.abspath(UpperCAmelCase__ )
logger.info(F'''Loading PyTorch weights from {pt_path}''' )
lowercase_ = torch.load(UpperCAmelCase__ , map_location="""cpu""" )
logger.info(F'''PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters.''' )
lowercase_ = convert_pytorch_state_dict_to_flax(UpperCAmelCase__ , UpperCAmelCase__ )
else:
# model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files
lowercase_ = convert_pytorch_sharded_state_dict_to_flax(UpperCAmelCase__ , UpperCAmelCase__ )
return flax_state_dict
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , ):
def is_key_or_prefix_key_in_dict(UpperCAmelCase__ ) -> bool:
return len(set(UpperCAmelCase__ ) & {key, (model_prefix,) + key} ) > 0
# layer norm
lowercase_ = pt_tuple_key[:-1] + ("""scale""",)
if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(UpperCAmelCase__ ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer mean
lowercase_ = pt_tuple_key[:-1] + ("""mean""",)
if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(UpperCAmelCase__ ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer var
lowercase_ = pt_tuple_key[:-1] + ("""var""",)
if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(UpperCAmelCase__ ):
return renamed_pt_tuple_key, pt_tensor
# embedding
lowercase_ = pt_tuple_key[:-1] + ("""embedding""",)
if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(UpperCAmelCase__ ):
return renamed_pt_tuple_key, pt_tensor
# conv layer
lowercase_ = pt_tuple_key[:-1] + ("""kernel""",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(UpperCAmelCase__ ):
lowercase_ = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
lowercase_ = pt_tuple_key[:-1] + ("""kernel""",)
if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(UpperCAmelCase__ ):
lowercase_ = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
lowercase_ = pt_tuple_key[:-1] + ("""weight""",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
lowercase_ = pt_tuple_key[:-1] + ("""bias""",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
lowercase_ = None
if pt_tuple_key[-3::2] == ("parametrizations", "original0"):
lowercase_ = pt_tuple_key[-2] + """_g"""
elif pt_tuple_key[-3::2] == ("parametrizations", "original1"):
lowercase_ = pt_tuple_key[-2] + """_v"""
if name is not None:
lowercase_ = pt_tuple_key[:-3] + (name,)
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ):
# convert pytorch tensor to numpy
lowercase_ = {k: v.numpy() for k, v in pt_state_dict.items()}
lowercase_ = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers
if "params" in flax_model.params:
lowercase_ = flax_model.params["""params"""]
else:
lowercase_ = flax_model.params
lowercase_ = flatten_dict(UpperCAmelCase__ )
# add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
lowercase_ = flatten_dict(flax_model.params["""batch_stats"""] )
random_flax_state_dict.update(UpperCAmelCase__ )
lowercase_ = {}
lowercase_ = (model_prefix not in flax_model_params) and (
model_prefix in {k.split(""".""" )[0] for k in pt_state_dict.keys()}
)
lowercase_ = (model_prefix in flax_model_params) and (
model_prefix not in {k.split(""".""" )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
lowercase_ = tuple(pt_key.split(""".""" ) )
# remove base model prefix if necessary
lowercase_ = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
lowercase_ = pt_tuple_key[1:]
# Correctly rename weight parameters
lowercase_ , lowercase_ = rename_key_and_reshape_tensor(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# add model prefix if necessary
lowercase_ = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
lowercase_ = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '''
F'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1] or "var" in flax_key[-1]:
lowercase_ = jnp.asarray(UpperCAmelCase__ )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(UpperCAmelCase__ , UpperCAmelCase__ )
continue
# also add unexpected weight so that warning is thrown
lowercase_ = jnp.asarray(UpperCAmelCase__ )
else:
# also add unexpected weight so that warning is thrown
lowercase_ = jnp.asarray(UpperCAmelCase__ )
return unflatten_dict(UpperCAmelCase__ )
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ):
import torch
# Load the index
lowercase_ = {}
for shard_file in shard_filenames:
# load using msgpack utils
lowercase_ = torch.load(UpperCAmelCase__ )
lowercase_ = {k: v.numpy() for k, v in pt_state_dict.items()}
lowercase_ = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
lowercase_ = flax_model.params["""params"""]
lowercase_ = flatten_dict(UpperCAmelCase__ )
random_flax_state_dict.update(flatten_dict(flax_model.params["""batch_stats"""] ) )
else:
lowercase_ = flax_model.params
lowercase_ = flatten_dict(UpperCAmelCase__ )
lowercase_ = (model_prefix not in flax_model_params) and (
model_prefix in {k.split(""".""" )[0] for k in pt_state_dict.keys()}
)
lowercase_ = (model_prefix in flax_model_params) and (
model_prefix not in {k.split(""".""" )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
lowercase_ = tuple(pt_key.split(""".""" ) )
# remove base model prefix if necessary
lowercase_ = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
lowercase_ = pt_tuple_key[1:]
# Correctly rename weight parameters
lowercase_ , lowercase_ = rename_key_and_reshape_tensor(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# add model prefix if necessary
lowercase_ = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
lowercase_ = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '''
F'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1]:
lowercase_ = jnp.asarray(UpperCAmelCase__ )
continue
if "var" in flax_key[-1]:
lowercase_ = jnp.asarray(UpperCAmelCase__ )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(UpperCAmelCase__ , UpperCAmelCase__ )
continue
# also add unexpected weight so that warning is thrown
lowercase_ = jnp.asarray(UpperCAmelCase__ )
else:
# also add unexpected weight so that warning is thrown
lowercase_ = jnp.asarray(UpperCAmelCase__ )
return unflatten_dict(UpperCAmelCase__ )
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase_ = os.path.abspath(UpperCAmelCase__ )
logger.info(F'''Loading Flax weights from {flax_checkpoint_path}''' )
# import correct flax class
lowercase_ = getattr(UpperCAmelCase__ , """Flax""" + model.__class__.__name__ )
# load flax weight dict
with open(UpperCAmelCase__ , """rb""" ) as state_f:
try:
lowercase_ = from_bytes(UpperCAmelCase__ , state_f.read() )
except UnpicklingError:
raise EnvironmentError(F'''Unable to convert {flax_checkpoint_path} to Flax deserializable object. ''' )
return load_flax_weights_in_pytorch_model(UpperCAmelCase__ , UpperCAmelCase__ )
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ):
try:
import torch # noqa: F401
except ImportError:
logger.error(
"""Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see"""
""" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"""
""" instructions.""" )
raise
# check if we have bf16 weights
lowercase_ = flatten_dict(jax.tree_util.tree_map(lambda UpperCAmelCase__ : x.dtype == jnp.bfloataa , UpperCAmelCase__ ) ).values()
if any(UpperCAmelCase__ ):
# convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
"""Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` """
"""before loading those in PyTorch model.""" )
lowercase_ = jax.tree_util.tree_map(
lambda UpperCAmelCase__ : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , UpperCAmelCase__ )
lowercase_ = flatten_dict(UpperCAmelCase__ )
lowercase_ = pt_model.state_dict()
lowercase_ = (pt_model.base_model_prefix in flax_state) and (
pt_model.base_model_prefix not in {k.split(""".""" )[0] for k in pt_model_dict.keys()}
)
lowercase_ = (pt_model.base_model_prefix not in flax_state) and (
pt_model.base_model_prefix in {k.split(""".""" )[0] for k in pt_model_dict.keys()}
)
# keep track of unexpected & missing keys
lowercase_ = []
lowercase_ = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
lowercase_ = flax_key_tuple[0] == pt_model.base_model_prefix
lowercase_ = """.""".join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict
# adapt flax_key to prepare for loading from/to base model only
if load_model_with_head_into_base_model and has_base_model_prefix:
lowercase_ = flax_key_tuple[1:]
elif load_base_model_into_model_with_head and require_base_model_prefix:
lowercase_ = (pt_model.base_model_prefix,) + flax_key_tuple
# rename flax weights to PyTorch format
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(UpperCAmelCase__ ) not in pt_model_dict:
# conv layer
lowercase_ = flax_key_tuple[:-1] + ("""weight""",)
lowercase_ = jnp.transpose(UpperCAmelCase__ , (3, 2, 0, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(UpperCAmelCase__ ) not in pt_model_dict:
# linear layer
lowercase_ = flax_key_tuple[:-1] + ("""weight""",)
lowercase_ = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
lowercase_ = flax_key_tuple[:-1] + ("""weight""",)
# adding batch stats from flax batch norm to pt
elif "mean" in flax_key_tuple[-1]:
lowercase_ = flax_key_tuple[:-1] + ("""running_mean""",)
elif "var" in flax_key_tuple[-1]:
lowercase_ = flax_key_tuple[:-1] + ("""running_var""",)
if "batch_stats" in flax_state:
lowercase_ = """.""".join(flax_key_tuple[1:] ) # Remove the params/batch_stats header
else:
lowercase_ = """.""".join(UpperCAmelCase__ )
# We also need to look at `pt_model_dict` and see if there are keys requiring further transformation.
lowercase_ = {}
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
for key in pt_model_dict:
lowercase_ = key.split(""".""" )
lowercase_ = None
if key_components[-3::2] == ["parametrizations", "original0"]:
lowercase_ = key_components[-2] + """_g"""
elif key_components[-3::2] == ["parametrizations", "original1"]:
lowercase_ = key_components[-2] + """_v"""
if name is not None:
lowercase_ = key_components[:-3] + [name]
lowercase_ = """.""".join(UpperCAmelCase__ )
lowercase_ = key
if flax_key in special_pt_names:
lowercase_ = special_pt_names[flax_key]
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
F'''Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected '''
F'''to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
else:
# add weight to pytorch dict
lowercase_ = np.asarray(UpperCAmelCase__ ) if not isinstance(UpperCAmelCase__ , np.ndarray ) else flax_tensor
lowercase_ = torch.from_numpy(UpperCAmelCase__ )
# remove from missing keys
missing_keys.remove(UpperCAmelCase__ )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(UpperCAmelCase__ )
pt_model.load_state_dict(UpperCAmelCase__ )
# re-transform missing_keys to list
lowercase_ = list(UpperCAmelCase__ )
if len(UpperCAmelCase__ ) > 0:
logger.warning(
"""Some weights of the Flax model were not used when initializing the PyTorch model"""
F''' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing'''
F''' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture'''
""" (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This"""
F''' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect'''
""" to be exactly identical (e.g. initializing a BertForSequenceClassification model from a"""
""" FlaxBertForSequenceClassification model).""" )
else:
logger.warning(F'''All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n''' )
if len(UpperCAmelCase__ ) > 0:
logger.warning(
F'''Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly'''
F''' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to'''
""" use it for predictions and inference.""" )
else:
logger.warning(
F'''All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n'''
"""If your task is similar to the task the model of the checkpoint was trained on, """
F'''you can already use {pt_model.__class__.__name__} for predictions without further training.''' )
return pt_model
| 650
|
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
a = logging.get_logger(__name__)
a = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
a = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
for attribute in key.split(""".""" ):
lowercase_ = getattr(UpperCAmelCase__ , UpperCAmelCase__ )
if weight_type is not None:
lowercase_ = getattr(UpperCAmelCase__ , UpperCAmelCase__ ).shape
else:
lowercase_ = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
lowercase_ = value
elif weight_type == "weight_g":
lowercase_ = value
elif weight_type == "weight_v":
lowercase_ = value
elif weight_type == "bias":
lowercase_ = value
else:
lowercase_ = value
logger.info(F'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase_ = []
lowercase_ = fairseq_model.state_dict()
lowercase_ = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
lowercase_ = None
for name, value in fairseq_dict.items():
lowercase_ = False
if "conv_layers" in name:
load_conv_layer(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , hf_model.config.feat_extract_norm == """group""" , )
lowercase_ = True
elif name.split(""".""" )[0] == "proj":
lowercase_ = fairseq_model.proj
lowercase_ = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
lowercase_ = True
if "*" in mapped_key:
lowercase_ = name.split(UpperCAmelCase__ )[0].split(""".""" )[-2]
lowercase_ = mapped_key.replace("""*""" , UpperCAmelCase__ )
if "weight_g" in name:
lowercase_ = """weight_g"""
elif "weight_v" in name:
lowercase_ = """weight_v"""
elif "bias" in name:
lowercase_ = """bias"""
elif "weight" in name:
lowercase_ = """weight"""
else:
lowercase_ = None
set_recursively(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
continue
if not is_used:
unused_weights.append(UpperCAmelCase__ )
logger.warning(F'''Unused weights: {unused_weights}''' )
return proj_weight
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase_ = full_name.split("""conv_layers.""" )[-1]
lowercase_ = name.split(""".""" )
lowercase_ = int(items[0] )
lowercase_ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
lowercase_ = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
lowercase_ = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
lowercase_ = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
lowercase_ = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(UpperCAmelCase__ )
def UpperCAmelCase_ ( UpperCAmelCase__ ):
lowercase_ , lowercase_ = emb.weight.shape
lowercase_ = nn.Linear(UpperCAmelCase__ , UpperCAmelCase__ , bias=UpperCAmelCase__ )
lowercase_ = emb.weight.data
return lin_layer
def UpperCAmelCase_ ( UpperCAmelCase__ ):
with open(UpperCAmelCase__ , """r""" , encoding="""utf-8""" ) as f:
lowercase_ = f.readlines()
lowercase_ = [line.split(""" """ )[0] for line in lines]
lowercase_ = len(UpperCAmelCase__ )
lowercase_ = {
"""<s>""": 0,
"""<pad>""": 1,
"""</s>""": 2,
"""<unk>""": 3,
}
vocab_dict.update(dict(zip(UpperCAmelCase__ , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , ):
lowercase_ = WavaVecaConfig.from_pretrained(UpperCAmelCase__ )
lowercase_ = SpeechaTextaConfig.from_pretrained(
UpperCAmelCase__ , vocab_size=UpperCAmelCase__ , decoder_layers=UpperCAmelCase__ , do_stable_layer_norm=UpperCAmelCase__ )
lowercase_ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , )
lowercase_ , lowercase_ , lowercase_ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
lowercase_ = model[0].eval()
# set weights for wav2vec2 encoder
lowercase_ = WavaVecaModel(UpperCAmelCase__ )
lowercase_ = recursively_load_weights_wavaveca(model.encoder , UpperCAmelCase__ )
lowercase_ = SpeechaTextaForCausalLM(UpperCAmelCase__ )
lowercase_ , lowercase_ = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=UpperCAmelCase__ )
# set output linear layer
unexpected_keys.remove("""embed_out""" )
lowercase_ = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(F'''The following keys are missing when loading the decoder weights: {missing_keys}''' )
logger.warning(F'''The following keys are unexpected when loading the decoder weights: {unexpected_keys}''' )
lowercase_ = SpeechEncoderDecoderModel(encoder=UpperCAmelCase__ , decoder=UpperCAmelCase__ )
lowercase_ = False
# add projection layer
lowercase_ = nn.Parameter(projection_layer.weight )
lowercase_ = nn.Parameter(projection_layer.bias )
lowercase_ = create_vocab_dict(UpperCAmelCase__ )
with open(os.path.join(UpperCAmelCase__ , """vocab.json""" ) , """w""" ) as fp:
json.dump(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase_ = SpeechaTextaTokenizer(os.path.join(UpperCAmelCase__ , """vocab.json""" ) )
tokenizer.save_pretrained(UpperCAmelCase__ )
lowercase_ = hf_wavavec.config.to_dict()
lowercase_ = tokenizer.pad_token_id
lowercase_ = tokenizer.bos_token_id
lowercase_ = tokenizer.eos_token_id
lowercase_ = """speech_to_text_2"""
lowercase_ = """wav2vec2"""
lowercase_ = SpeechEncoderDecoderConfig.from_dict(UpperCAmelCase__ )
hf_wavavec.save_pretrained(UpperCAmelCase__ )
feature_extractor.save_pretrained(UpperCAmelCase__ )
if __name__ == "__main__":
a = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument(
'--encoder_config_path',
default='facebook/wav2vec2-large-lv60',
type=str,
help='Path to hf encoder wav2vec2 checkpoint config',
)
parser.add_argument(
'--decoder_config_path',
default='facebook/s2t-small-mustc-en-fr-st',
type=str,
help='Path to hf decoder s2t checkpoint config',
)
parser.add_argument('--vocab_size', default=1_0_2_2_4, type=int, help='Vocab size of decoder')
parser.add_argument('--num_decoder_layers', default=7, type=int, help='Number of decoder layers')
a = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 650
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
a = {
'configuration_poolformer': [
'POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'PoolFormerConfig',
'PoolFormerOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = ['PoolFormerFeatureExtractor']
a = ['PoolFormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
'POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'PoolFormerForImageClassification',
'PoolFormerModel',
'PoolFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
a = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 650
|
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a = logging.get_logger(__name__)
# TODO Update this
a = {
'facebook/esm-1b': 'https://huggingface.co/facebook/esm-1b/resolve/main/config.json',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class UpperCamelCase__ ( __magic_name__ ):
__SCREAMING_SNAKE_CASE : Tuple = 'esm'
def __init__( self : Optional[Any] , UpperCamelCase__ : Dict=None , UpperCamelCase__ : Tuple=None , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : List[Any]=768 , UpperCamelCase__ : Tuple=12 , UpperCamelCase__ : Optional[int]=12 , UpperCamelCase__ : Dict=3_072 , UpperCamelCase__ : Union[str, Any]=0.1 , UpperCamelCase__ : Dict=0.1 , UpperCamelCase__ : Optional[int]=1_026 , UpperCamelCase__ : Any=0.02 , UpperCamelCase__ : Dict=1e-12 , UpperCamelCase__ : List[str]="absolute" , UpperCamelCase__ : Union[str, Any]=True , UpperCamelCase__ : Tuple=None , UpperCamelCase__ : Union[str, Any]=False , UpperCamelCase__ : Optional[int]=False , UpperCamelCase__ : Dict=None , UpperCamelCase__ : str=None , **UpperCamelCase__ : str , ):
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase__ , mask_token_id=UpperCamelCase__ , **UpperCamelCase__ )
lowercase_ = vocab_size
lowercase_ = hidden_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = intermediate_size
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = max_position_embeddings
lowercase_ = initializer_range
lowercase_ = layer_norm_eps
lowercase_ = position_embedding_type
lowercase_ = use_cache
lowercase_ = emb_layer_norm_before
lowercase_ = token_dropout
lowercase_ = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info("""No esmfold_config supplied for folding model, using default values.""" )
lowercase_ = EsmFoldConfig()
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
lowercase_ = EsmFoldConfig(**UpperCamelCase__ )
lowercase_ = esmfold_config
if vocab_list is None:
logger.warning("""No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!""" )
lowercase_ = get_default_vocab_list()
else:
lowercase_ = vocab_list
else:
lowercase_ = None
lowercase_ = None
if self.esmfold_config is not None and getattr(self.esmfold_config , """use_esm_attn_map""" , UpperCamelCase__ ):
raise ValueError("""The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!""" )
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
lowercase_ = super().to_dict()
if isinstance(self.esmfold_config , UpperCamelCase__ ):
lowercase_ = self.esmfold_config.to_dict()
return output
@dataclass
class UpperCamelCase__ :
__SCREAMING_SNAKE_CASE : str = None
__SCREAMING_SNAKE_CASE : bool = True
__SCREAMING_SNAKE_CASE : bool = False
__SCREAMING_SNAKE_CASE : bool = False
__SCREAMING_SNAKE_CASE : bool = False
__SCREAMING_SNAKE_CASE : float = 0
__SCREAMING_SNAKE_CASE : bool = True
__SCREAMING_SNAKE_CASE : bool = False
__SCREAMING_SNAKE_CASE : int = 128
__SCREAMING_SNAKE_CASE : "TrunkConfig" = None
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
if self.trunk is None:
lowercase_ = TrunkConfig()
elif isinstance(self.trunk , UpperCamelCase__ ):
lowercase_ = TrunkConfig(**self.trunk )
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ = asdict(self )
lowercase_ = self.trunk.to_dict()
return output
@dataclass
class UpperCamelCase__ :
__SCREAMING_SNAKE_CASE : int = 48
__SCREAMING_SNAKE_CASE : int = 1024
__SCREAMING_SNAKE_CASE : int = 128
__SCREAMING_SNAKE_CASE : int = 32
__SCREAMING_SNAKE_CASE : int = 32
__SCREAMING_SNAKE_CASE : int = 32
__SCREAMING_SNAKE_CASE : float = 0
__SCREAMING_SNAKE_CASE : float = 0
__SCREAMING_SNAKE_CASE : bool = False
__SCREAMING_SNAKE_CASE : int = 4
__SCREAMING_SNAKE_CASE : Optional[int] = 128
__SCREAMING_SNAKE_CASE : "StructureModuleConfig" = None
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
if self.structure_module is None:
lowercase_ = StructureModuleConfig()
elif isinstance(self.structure_module , UpperCamelCase__ ):
lowercase_ = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(F'''`max_recycles` should be positive, got {self.max_recycles}.''' )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
"""`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got"""
F''' {self.sequence_state_dim} and {self.sequence_state_dim}.''' )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
"""`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got"""
F''' {self.pairwise_state_dim} and {self.pairwise_state_dim}.''' )
lowercase_ = self.sequence_state_dim // self.sequence_head_width
lowercase_ = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
"""`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got"""
F''' {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.''' )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
"""`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got"""
F''' {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.''' )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(F'''`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.''' )
if self.dropout >= 0.4:
raise ValueError(F'''`dropout` should not be greater than 0.4, got {self.dropout}.''' )
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ = asdict(self )
lowercase_ = self.structure_module.to_dict()
return output
@dataclass
class UpperCamelCase__ :
__SCREAMING_SNAKE_CASE : int = 384
__SCREAMING_SNAKE_CASE : int = 128
__SCREAMING_SNAKE_CASE : int = 16
__SCREAMING_SNAKE_CASE : int = 128
__SCREAMING_SNAKE_CASE : int = 12
__SCREAMING_SNAKE_CASE : int = 4
__SCREAMING_SNAKE_CASE : int = 8
__SCREAMING_SNAKE_CASE : float = 0.1
__SCREAMING_SNAKE_CASE : int = 8
__SCREAMING_SNAKE_CASE : int = 1
__SCREAMING_SNAKE_CASE : int = 2
__SCREAMING_SNAKE_CASE : int = 7
__SCREAMING_SNAKE_CASE : int = 10
__SCREAMING_SNAKE_CASE : float = 1e-8
__SCREAMING_SNAKE_CASE : float = 1e5
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
return asdict(self )
def UpperCAmelCase_ ( ):
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 650
| 1
|
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
a = 1E-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class UpperCamelCase__ :
def __init__( self : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[int]=16 , UpperCamelCase__ : Optional[Any]=13 , UpperCamelCase__ : Tuple=7 , UpperCamelCase__ : List[str]=14 , UpperCamelCase__ : Union[str, Any]=10 , UpperCamelCase__ : List[Any]=19 , UpperCamelCase__ : Optional[int]=5 , UpperCamelCase__ : Optional[Any]=4 , UpperCamelCase__ : Any=True , UpperCamelCase__ : int=16 , UpperCamelCase__ : Union[str, Any]=2 , UpperCamelCase__ : Any=4 , UpperCamelCase__ : Optional[Any]=4 , UpperCamelCase__ : Union[str, Any]="gelu" , UpperCamelCase__ : int=0.1 , UpperCamelCase__ : str=0.1 , UpperCamelCase__ : str=[1, 2, 3, 4, 5] , UpperCamelCase__ : str=25 , UpperCamelCase__ : Dict=5 , ):
'''simple docstring'''
lowercase_ = d_model
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = prediction_length
lowercase_ = context_length
lowercase_ = cardinality
lowercase_ = num_time_features
lowercase_ = lags_sequence
lowercase_ = embedding_dimension
lowercase_ = is_training
lowercase_ = hidden_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = intermediate_size
lowercase_ = hidden_act
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = context_length
lowercase_ = prediction_length + label_length
lowercase_ = label_length
lowercase_ = moving_average
lowercase_ = autocorrelation_factor
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def UpperCAmelCase__ ( self : Tuple , UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
lowercase_ = config.context_length + max(config.lags_sequence )
lowercase_ = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
lowercase_ = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
lowercase_ = floats_tensor([self.batch_size, _past_length] )
lowercase_ = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
lowercase_ = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
lowercase_ = floats_tensor([self.batch_size, config.prediction_length] )
lowercase_ = {
"""past_values""": past_values,
"""static_categorical_features""": static_categorical_features,
"""past_time_features""": past_time_features,
"""past_observed_mask""": past_observed_mask,
"""future_time_features""": future_time_features,
"""future_values""": future_values,
}
return inputs_dict
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ = self.get_config()
lowercase_ = self.prepare_autoformer_inputs_dict(UpperCamelCase__ )
return config, inputs_dict
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ , lowercase_ = self.prepare_config_and_inputs()
return config, inputs_dict
def UpperCAmelCase__ ( self : Tuple , UpperCamelCase__ : Tuple , UpperCamelCase__ : int ):
'''simple docstring'''
lowercase_ = AutoformerModel(config=UpperCamelCase__ ).to(UpperCamelCase__ ).eval()
lowercase_ = model(**UpperCamelCase__ )
lowercase_ = outputs.encoder_last_hidden_state
lowercase_ = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase_ = model.get_encoder()
encoder.save_pretrained(UpperCamelCase__ )
lowercase_ = AutoformerEncoder.from_pretrained(UpperCamelCase__ ).to(UpperCamelCase__ )
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ = model.create_network_inputs(**UpperCamelCase__ )
lowercase_ , lowercase_ = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
lowercase_ = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
lowercase_ = encoder(inputs_embeds=UpperCamelCase__ )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 )
lowercase_ = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
lowercase_ = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
lowercase_ = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
lowercase_ = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase_ = model.get_decoder()
decoder.save_pretrained(UpperCamelCase__ )
lowercase_ = AutoformerDecoder.from_pretrained(UpperCamelCase__ ).to(UpperCamelCase__ )
lowercase_ = decoder(
trend=UpperCamelCase__ , inputs_embeds=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 )
@require_torch
class UpperCamelCase__ ( __magic_name__ , __magic_name__ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : str = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
__SCREAMING_SNAKE_CASE : str = (AutoformerForPrediction,) if is_torch_available() else ()
__SCREAMING_SNAKE_CASE : Tuple = {'feature-extraction': AutoformerModel} if is_torch_available() else {}
__SCREAMING_SNAKE_CASE : str = False
__SCREAMING_SNAKE_CASE : int = False
__SCREAMING_SNAKE_CASE : Tuple = False
__SCREAMING_SNAKE_CASE : Optional[Any] = False
__SCREAMING_SNAKE_CASE : Tuple = False
__SCREAMING_SNAKE_CASE : Optional[Any] = False
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
lowercase_ = AutoformerModelTester(self )
lowercase_ = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ )
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
lowercase_ = model_class(UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCamelCase__ )
lowercase_ , lowercase_ = model_class.from_pretrained(UpperCamelCase__ , output_loading_info=UpperCamelCase__ )
self.assertEqual(info["""missing_keys"""] , [] )
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*UpperCamelCase__ )
@unittest.skip(reason="""Model has no tokens embeddings""" )
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
pass
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
lowercase_ = inspect.signature(getattr(UpperCamelCase__ , """forward""" ) )
# The main input is the name of the argument after `self`
lowercase_ = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , UpperCamelCase__ )
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ = model_class(UpperCamelCase__ )
lowercase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ = [*signature.parameters.keys()]
lowercase_ = [
"""past_values""",
"""past_time_features""",
"""past_observed_mask""",
"""static_categorical_features""",
"""static_real_features""",
"""future_values""",
"""future_time_features""",
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append("""future_observed_mask""" )
expected_arg_names.extend(
[
"""decoder_attention_mask""",
"""head_mask""",
"""decoder_head_mask""",
"""cross_attn_head_mask""",
"""encoder_outputs""",
"""past_key_values""",
"""output_hidden_states""",
"""output_attentions""",
"""use_cache""",
"""return_dict""",
] )
self.assertListEqual(arg_names[: len(UpperCamelCase__ )] , UpperCamelCase__ )
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ = True
lowercase_ = getattr(self.model_tester , """seq_length""" , UpperCamelCase__ )
lowercase_ = getattr(self.model_tester , """decoder_seq_length""" , UpperCamelCase__ )
lowercase_ = getattr(self.model_tester , """encoder_seq_length""" , UpperCamelCase__ )
lowercase_ = getattr(self.model_tester , """d_model""" , UpperCamelCase__ )
lowercase_ = getattr(self.model_tester , """num_attention_heads""" , UpperCamelCase__ )
lowercase_ = d_model // num_attention_heads
for model_class in self.all_model_classes:
lowercase_ = True
lowercase_ = False
lowercase_ = True
lowercase_ = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
lowercase_ = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
lowercase_ = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowercase_ = True
lowercase_ = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
lowercase_ = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
lowercase_ = outputs.encoder_attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
lowercase_ = len(UpperCamelCase__ )
lowercase_ = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
# decoder attentions
lowercase_ = outputs.decoder_attentions
self.assertIsInstance(UpperCamelCase__ , (list, tuple) )
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
lowercase_ = outputs.cross_attentions
self.assertIsInstance(UpperCamelCase__ , (list, tuple) )
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
lowercase_ = True
lowercase_ = True
lowercase_ = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
lowercase_ = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
self.assertEqual(out_len + 2 , len(UpperCamelCase__ ) )
lowercase_ = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
super().test_retain_grad_hidden_states_attentions()
def UpperCAmelCase_ ( UpperCAmelCase__="train-batch.pt" ):
lowercase_ = hf_hub_download(repo_id="""hf-internal-testing/tourism-monthly-batch""" , filename=UpperCAmelCase__ , repo_type="""dataset""" )
lowercase_ = torch.load(UpperCAmelCase__ , map_location=UpperCAmelCase__ )
return batch
@require_torch
@slow
class UpperCamelCase__ ( unittest.TestCase ):
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
lowercase_ = AutoformerModel.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(UpperCamelCase__ )
lowercase_ = prepare_batch()
with torch.no_grad():
lowercase_ = model(
past_values=batch["""past_values"""] , past_time_features=batch["""past_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , static_categorical_features=batch["""static_categorical_features"""] , future_values=batch["""future_values"""] , future_time_features=batch["""future_time_features"""] , )[0]
lowercase_ = torch.Size(
(64, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , UpperCamelCase__ )
lowercase_ = torch.tensor(
[[0.3_593, -1.3_398, 0.6_330], [0.2_279, 1.5_396, -0.1_792], [0.0_450, 1.3_225, -0.2_335]] , device=UpperCamelCase__ )
self.assertTrue(torch.allclose(output[0, :3, :3] , UpperCamelCase__ , atol=UpperCamelCase__ ) )
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ = AutoformerForPrediction.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(UpperCamelCase__ )
lowercase_ = prepare_batch("""val-batch.pt""" )
with torch.no_grad():
lowercase_ = model(
past_values=batch["""past_values"""] , past_time_features=batch["""past_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , static_categorical_features=batch["""static_categorical_features"""] , ).encoder_last_hidden_state
lowercase_ = torch.Size((64, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , UpperCamelCase__ )
lowercase_ = torch.tensor(
[[-0.0_734, -0.9_036, 0.8_358], [4.7_186, 2.4_113, 1.9_581], [1.7_953, 2.3_558, 1.2_970]] , device=UpperCamelCase__ )
self.assertTrue(torch.allclose(output[0, :3, :3] , UpperCamelCase__ , atol=UpperCamelCase__ ) )
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
lowercase_ = AutoformerForPrediction.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(UpperCamelCase__ )
lowercase_ = prepare_batch("""val-batch.pt""" )
with torch.no_grad():
lowercase_ = model.generate(
static_categorical_features=batch["""static_categorical_features"""] , past_time_features=batch["""past_time_features"""] , past_values=batch["""past_values"""] , future_time_features=batch["""future_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , )
lowercase_ = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , UpperCamelCase__ )
lowercase_ = torch.tensor([3_130.6_763, 4_056.5_293, 7_053.0_786] , device=UpperCamelCase__ )
lowercase_ = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , UpperCamelCase__ , rtol=1e-1 ) )
| 650
|
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import platform
import numpy as np
import psutil
import torch
from accelerate import __version__ as version
from accelerate.commands.config import default_config_file, load_config_from_file
from ..utils import is_npu_available, is_xpu_available
def UpperCAmelCase_ ( UpperCAmelCase__=None ):
if subparsers is not None:
lowercase_ = subparsers.add_parser("""env""" )
else:
lowercase_ = argparse.ArgumentParser("""Accelerate env command""" )
parser.add_argument(
"""--config_file""" , default=UpperCAmelCase__ , help="""The config file to use for the default values in the launching script.""" )
if subparsers is not None:
parser.set_defaults(func=UpperCAmelCase__ )
return parser
def UpperCAmelCase_ ( UpperCAmelCase__ ):
lowercase_ = torch.__version__
lowercase_ = torch.cuda.is_available()
lowercase_ = is_xpu_available()
lowercase_ = is_npu_available()
lowercase_ = """Not found"""
# Get the default from the config file.
if args.config_file is not None or os.path.isfile(UpperCAmelCase__ ):
lowercase_ = load_config_from_file(args.config_file ).to_dict()
lowercase_ = {
"""`Accelerate` version""": version,
"""Platform""": platform.platform(),
"""Python version""": platform.python_version(),
"""Numpy version""": np.__version__,
"""PyTorch version (GPU?)""": F'''{pt_version} ({pt_cuda_available})''',
"""PyTorch XPU available""": str(UpperCAmelCase__ ),
"""PyTorch NPU available""": str(UpperCAmelCase__ ),
"""System RAM""": F'''{psutil.virtual_memory().total / 1_0_2_4 ** 3:.2f} GB''',
}
if pt_cuda_available:
lowercase_ = torch.cuda.get_device_name()
print("""\nCopy-and-paste the text below in your GitHub issue\n""" )
print("""\n""".join([F'''- {prop}: {val}''' for prop, val in info.items()] ) )
print("""- `Accelerate` default config:""" if args.config_file is None else """- `Accelerate` config passed:""" )
lowercase_ = (
"""\n""".join([F'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()] )
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
else F'''\t{accelerate_config}'''
)
print(UpperCAmelCase__ )
lowercase_ = accelerate_config
return info
def UpperCAmelCase_ ( ):
lowercase_ = env_command_parser()
lowercase_ = parser.parse_args()
env_command(UpperCAmelCase__ )
return 0
if __name__ == "__main__":
raise SystemExit(main())
| 650
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
a = {
'configuration_mobilenet_v2': [
'MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP',
'MobileNetV2Config',
'MobileNetV2OnnxConfig',
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = ['MobileNetV2FeatureExtractor']
a = ['MobileNetV2ImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
'MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST',
'MobileNetV2ForImageClassification',
'MobileNetV2ForSemanticSegmentation',
'MobileNetV2Model',
'MobileNetV2PreTrainedModel',
'load_tf_weights_in_mobilenet_v2',
]
if TYPE_CHECKING:
from .configuration_mobilenet_va import (
MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileNetVaConfig,
MobileNetVaOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilenet_va import MobileNetVaFeatureExtractor
from .image_processing_mobilenet_va import MobileNetVaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilenet_va import (
MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileNetVaForImageClassification,
MobileNetVaForSemanticSegmentation,
MobileNetVaModel,
MobileNetVaPreTrainedModel,
load_tf_weights_in_mobilenet_va,
)
else:
import sys
a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 650
|
import inspect
import unittest
import warnings
from transformers import DeiTConfig
from transformers.models.auto import get_values
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
)
from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class UpperCamelCase__ :
def __init__( self : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple=13 , UpperCamelCase__ : Tuple=30 , UpperCamelCase__ : List[Any]=2 , UpperCamelCase__ : Union[str, Any]=3 , UpperCamelCase__ : Union[str, Any]=True , UpperCamelCase__ : Tuple=True , UpperCamelCase__ : int=32 , UpperCamelCase__ : Tuple=5 , UpperCamelCase__ : List[Any]=4 , UpperCamelCase__ : Dict=37 , UpperCamelCase__ : Tuple="gelu" , UpperCamelCase__ : Union[str, Any]=0.1 , UpperCamelCase__ : Optional[int]=0.1 , UpperCamelCase__ : Any=10 , UpperCamelCase__ : Optional[int]=0.02 , UpperCamelCase__ : Dict=3 , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : Union[str, Any]=2 , ):
'''simple docstring'''
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = image_size
lowercase_ = patch_size
lowercase_ = num_channels
lowercase_ = is_training
lowercase_ = use_labels
lowercase_ = hidden_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = intermediate_size
lowercase_ = hidden_act
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = type_sequence_label_size
lowercase_ = initializer_range
lowercase_ = scope
lowercase_ = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
lowercase_ = (image_size // patch_size) ** 2
lowercase_ = num_patches + 2
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase_ = None
if self.use_labels:
lowercase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase_ = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def UpperCAmelCase__ ( self : Tuple , UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple ):
'''simple docstring'''
lowercase_ = DeiTModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowercase_ = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase__ ( self : Optional[int] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[str] , UpperCamelCase__ : str ):
'''simple docstring'''
lowercase_ = DeiTForMaskedImageModeling(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowercase_ = model(UpperCamelCase__ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowercase_ = 1
lowercase_ = DeiTForMaskedImageModeling(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowercase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase_ = model(UpperCamelCase__ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def UpperCAmelCase__ ( self : Tuple , UpperCamelCase__ : str , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[str] ):
'''simple docstring'''
lowercase_ = self.type_sequence_label_size
lowercase_ = DeiTForImageClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowercase_ = model(UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowercase_ = 1
lowercase_ = DeiTForImageClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowercase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase_ = model(UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
lowercase_ = self.prepare_config_and_inputs()
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) = config_and_inputs
lowercase_ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( __magic_name__ , __magic_name__ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Any = (
(
DeiTModel,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
__SCREAMING_SNAKE_CASE : str = (
{
'feature-extraction': DeiTModel,
'image-classification': (DeiTForImageClassification, DeiTForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
__SCREAMING_SNAKE_CASE : Dict = False
__SCREAMING_SNAKE_CASE : List[Any] = False
__SCREAMING_SNAKE_CASE : List[Any] = False
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ = DeiTModelTester(self )
lowercase_ = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ , hidden_size=37 )
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""DeiT does not use inputs_embeds""" )
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
pass
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ = model_class(UpperCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase__ , nn.Linear ) )
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ = model_class(UpperCamelCase__ )
lowercase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ = [*signature.parameters.keys()]
lowercase_ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*UpperCamelCase__ )
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase__ )
def UpperCAmelCase__ ( self : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any]=False ):
'''simple docstring'''
lowercase_ = super()._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
if return_labels:
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
if not self.model_tester.is_training:
return
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ = True
for model_class in self.all_model_classes:
# DeiTForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(UpperCamelCase__ )
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
lowercase_ = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.train()
lowercase_ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
lowercase_ = model(**UpperCamelCase__ ).loss
loss.backward()
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
lowercase_ = False
lowercase_ = True
for model_class in self.all_model_classes:
if model_class in get_values(UpperCamelCase__ ) or not model_class.supports_gradient_checkpointing:
continue
# DeiTForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
continue
lowercase_ = model_class(UpperCamelCase__ )
model.gradient_checkpointing_enable()
model.to(UpperCamelCase__ )
model.train()
lowercase_ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
lowercase_ = model(**UpperCamelCase__ ).loss
loss.backward()
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ = [
{"""title""": """multi_label_classification""", """num_labels""": 2, """dtype""": torch.float},
{"""title""": """single_label_classification""", """num_labels""": 1, """dtype""": torch.long},
{"""title""": """regression""", """num_labels""": 1, """dtype""": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(UpperCamelCase__ ),
*get_values(UpperCamelCase__ ),
]
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F'''Testing {model_class} with {problem_type["title"]}''' ):
lowercase_ = problem_type["""title"""]
lowercase_ = problem_type["""num_labels"""]
lowercase_ = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.train()
lowercase_ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
if problem_type["num_labels"] > 1:
lowercase_ = inputs["""labels"""].unsqueeze(1 ).repeat(1 , problem_type["""num_labels"""] )
lowercase_ = inputs["""labels"""].to(problem_type["""dtype"""] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=UpperCamelCase__ ) as warning_list:
lowercase_ = model(**UpperCamelCase__ ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F'''Something is going wrong in the regression problem: intercepted {w.message}''' )
loss.backward()
@slow
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ = DeiTModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def UpperCAmelCase_ ( ):
lowercase_ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class UpperCamelCase__ ( unittest.TestCase ):
@cached_property
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
return (
DeiTImageProcessor.from_pretrained("""facebook/deit-base-distilled-patch16-224""" )
if is_vision_available()
else None
)
@slow
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
lowercase_ = DeiTForImageClassificationWithTeacher.from_pretrained("""facebook/deit-base-distilled-patch16-224""" ).to(
UpperCamelCase__ )
lowercase_ = self.default_image_processor
lowercase_ = prepare_img()
lowercase_ = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
lowercase_ = model(**UpperCamelCase__ )
# verify the logits
lowercase_ = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
lowercase_ = torch.tensor([-1.0_266, 0.1_912, -1.2_861] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1e-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ = DeiTModel.from_pretrained(
"""facebook/deit-base-distilled-patch16-224""" , torch_dtype=torch.floataa , device_map="""auto""" )
lowercase_ = self.default_image_processor
lowercase_ = prepare_img()
lowercase_ = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" )
lowercase_ = inputs.pixel_values.to(UpperCamelCase__ )
# forward pass to make sure inference works in fp16
with torch.no_grad():
lowercase_ = model(UpperCamelCase__ )
| 650
| 1
|
import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForWholeWordMask,
HfArgumentParser,
LineByLineTextDataset,
LineByLineWithRefDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
a = logging.getLogger(__name__)
a = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
a = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class UpperCamelCase__ :
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__magic_name__ , metadata={
'help': (
'The model checkpoint for weights initialization. Leave None if you want to train a model from'
' scratch.'
)
} , )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__magic_name__ , metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(__magic_name__ )} , )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__magic_name__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__magic_name__ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__magic_name__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
@dataclass
class UpperCamelCase__ :
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__magic_name__ , metadata={'help': 'The input training data file (a text file).'} )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__magic_name__ , metadata={
'help': (
'The input training data files (multiple files in glob format). '
'Very often splitting large files to smaller files can prevent tokenizer going out of memory'
)
} , )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__magic_name__ , metadata={'help': 'An optional input evaluation data file to evaluate the perplexity on (a text file).'} , )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__magic_name__ , metadata={'help': 'An optional input train ref data file for whole word mask in Chinese.'} , )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__magic_name__ , metadata={'help': 'An optional input eval ref data file for whole word mask in Chinese.'} , )
__SCREAMING_SNAKE_CASE : bool = field(
default=__magic_name__ , metadata={'help': 'Whether distinct lines of text in the dataset are to be handled as distinct sequences.'} , )
__SCREAMING_SNAKE_CASE : bool = field(
default=__magic_name__ , metadata={'help': 'Train with masked-language modeling loss instead of language modeling.'} )
__SCREAMING_SNAKE_CASE : bool = field(default=__magic_name__ , metadata={'help': 'Whether ot not to use whole word mask.'} )
__SCREAMING_SNAKE_CASE : float = field(
default=0.1_5 , metadata={'help': 'Ratio of tokens to mask for masked language modeling loss'} )
__SCREAMING_SNAKE_CASE : float = field(
default=1 / 6 , metadata={
'help': (
'Ratio of length of a span of masked tokens to surrounding context length for permutation language'
' modeling.'
)
} , )
__SCREAMING_SNAKE_CASE : int = field(
default=5 , metadata={'help': 'Maximum length of a span of masked tokens for permutation language modeling.'} )
__SCREAMING_SNAKE_CASE : int = field(
default=-1 , metadata={
'help': (
'Optional input sequence length after tokenization.'
'The training dataset will be truncated in block of this size for training.'
'Default to the model max input length for single sentence inputs (take into account special tokens).'
)
} , )
__SCREAMING_SNAKE_CASE : bool = field(
default=__magic_name__ , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = False , UpperCAmelCase__ = None , ):
def _dataset(UpperCAmelCase__ , UpperCAmelCase__=None ):
if args.line_by_line:
if ref_path is not None:
if not args.whole_word_mask or not args.mlm:
raise ValueError("""You need to set world whole masking and mlm to True for Chinese Whole Word Mask""" )
return LineByLineWithRefDataset(
tokenizer=UpperCAmelCase__ , file_path=UpperCAmelCase__ , block_size=args.block_size , ref_path=UpperCAmelCase__ , )
return LineByLineTextDataset(tokenizer=UpperCAmelCase__ , file_path=UpperCAmelCase__ , block_size=args.block_size )
else:
return TextDataset(
tokenizer=UpperCAmelCase__ , file_path=UpperCAmelCase__ , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=UpperCAmelCase__ , )
if evaluate:
return _dataset(args.eval_data_file , args.eval_ref_file )
elif args.train_data_files:
return ConcatDataset([_dataset(UpperCAmelCase__ ) for f in glob(args.train_data_files )] )
else:
return _dataset(args.train_data_file , args.train_ref_file )
def UpperCAmelCase_ ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowercase_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
lowercase_ , lowercase_ , lowercase_ = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
"""Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file """
"""or remove the --do_eval argument.""" )
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
""" --overwrite_output_dir to overcome.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("""Training/evaluation parameters %s""" , UpperCAmelCase__ )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
lowercase_ = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
lowercase_ = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
lowercase_ = CONFIG_MAPPING[model_args.model_type]()
logger.warning("""You are instantiating a new config instance from scratch.""" )
if model_args.tokenizer_name:
lowercase_ = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
lowercase_ = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
raise ValueError(
"""You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another"""
""" script, save it,and load it from here, using --tokenizer_name""" )
if model_args.model_name_or_path:
lowercase_ = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=UpperCAmelCase__ , cache_dir=model_args.cache_dir , )
else:
logger.info("""Training new model from scratch""" )
lowercase_ = AutoModelWithLMHead.from_config(UpperCAmelCase__ )
model.resize_token_embeddings(len(UpperCAmelCase__ ) )
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
"""BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the"""
"""--mlm flag (masked language modeling).""" )
if data_args.block_size <= 0:
lowercase_ = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
lowercase_ = min(data_args.block_size , tokenizer.max_len )
# Get datasets
lowercase_ = (
get_dataset(UpperCAmelCase__ , tokenizer=UpperCAmelCase__ , cache_dir=model_args.cache_dir ) if training_args.do_train else None
)
lowercase_ = (
get_dataset(UpperCAmelCase__ , tokenizer=UpperCAmelCase__ , evaluate=UpperCAmelCase__ , cache_dir=model_args.cache_dir )
if training_args.do_eval
else None
)
if config.model_type == "xlnet":
lowercase_ = DataCollatorForPermutationLanguageModeling(
tokenizer=UpperCAmelCase__ , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , )
else:
if data_args.mlm and data_args.whole_word_mask:
lowercase_ = DataCollatorForWholeWordMask(
tokenizer=UpperCAmelCase__ , mlm_probability=data_args.mlm_probability )
else:
lowercase_ = DataCollatorForLanguageModeling(
tokenizer=UpperCAmelCase__ , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
lowercase_ = Trainer(
model=UpperCAmelCase__ , args=UpperCAmelCase__ , data_collator=UpperCAmelCase__ , train_dataset=UpperCAmelCase__ , eval_dataset=UpperCAmelCase__ , prediction_loss_only=UpperCAmelCase__ , )
# Training
if training_args.do_train:
lowercase_ = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path )
else None
)
trainer.train(model_path=UpperCAmelCase__ )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
lowercase_ = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
lowercase_ = trainer.evaluate()
lowercase_ = math.exp(eval_output["""eval_loss"""] )
lowercase_ = {"""perplexity""": perplexity}
lowercase_ = os.path.join(training_args.output_dir , """eval_results_lm.txt""" )
if trainer.is_world_master():
with open(UpperCAmelCase__ , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key in sorted(result.keys() ):
logger.info(""" %s = %s""" , UpperCAmelCase__ , str(result[key] ) )
writer.write("""%s = %s\n""" % (key, str(result[key] )) )
results.update(UpperCAmelCase__ )
return results
def UpperCAmelCase_ ( UpperCAmelCase__ ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 650
|
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 650
| 1
|
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
a = logging.get_logger('transformers.models.speecht5')
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
hf_model.apply_weight_norm()
lowercase_ = checkpoint["""input_conv.weight_g"""]
lowercase_ = checkpoint["""input_conv.weight_v"""]
lowercase_ = checkpoint["""input_conv.bias"""]
for i in range(len(config.upsample_rates ) ):
lowercase_ = checkpoint[F'''upsamples.{i}.1.weight_g''']
lowercase_ = checkpoint[F'''upsamples.{i}.1.weight_v''']
lowercase_ = checkpoint[F'''upsamples.{i}.1.bias''']
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
lowercase_ = checkpoint[F'''blocks.{i}.convs1.{j}.1.weight_g''']
lowercase_ = checkpoint[F'''blocks.{i}.convs1.{j}.1.weight_v''']
lowercase_ = checkpoint[F'''blocks.{i}.convs1.{j}.1.bias''']
lowercase_ = checkpoint[F'''blocks.{i}.convs2.{j}.1.weight_g''']
lowercase_ = checkpoint[F'''blocks.{i}.convs2.{j}.1.weight_v''']
lowercase_ = checkpoint[F'''blocks.{i}.convs2.{j}.1.bias''']
lowercase_ = checkpoint["""output_conv.1.weight_g"""]
lowercase_ = checkpoint["""output_conv.1.weight_v"""]
lowercase_ = checkpoint["""output_conv.1.bias"""]
hf_model.remove_weight_norm()
@torch.no_grad()
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__=None , UpperCAmelCase__=None , ):
if config_path is not None:
lowercase_ = SpeechTaHifiGanConfig.from_pretrained(UpperCAmelCase__ )
else:
lowercase_ = SpeechTaHifiGanConfig()
lowercase_ = SpeechTaHifiGan(UpperCAmelCase__ )
lowercase_ = torch.load(UpperCAmelCase__ )
load_weights(orig_checkpoint["""model"""]["""generator"""] , UpperCAmelCase__ , UpperCAmelCase__ )
lowercase_ = np.load(UpperCAmelCase__ )
lowercase_ = stats[0].reshape(-1 )
lowercase_ = stats[1].reshape(-1 )
lowercase_ = torch.from_numpy(UpperCAmelCase__ ).float()
lowercase_ = torch.from_numpy(UpperCAmelCase__ ).float()
model.save_pretrained(UpperCAmelCase__ )
if repo_id:
print("""Pushing to the hub...""" )
model.push_to_hub(UpperCAmelCase__ )
if __name__ == "__main__":
a = argparse.ArgumentParser()
parser.add_argument('--checkpoint_path', required=True, default=None, type=str, help='Path to original checkpoint')
parser.add_argument('--stats_path', required=True, default=None, type=str, help='Path to stats.npy file')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--pytorch_dump_folder_path', required=True, default=None, type=str, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.'
)
a = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 650
|
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
a = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class UpperCamelCase__ ( __magic_name__ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[Any] = XGLMTokenizer
__SCREAMING_SNAKE_CASE : List[Any] = XGLMTokenizerFast
__SCREAMING_SNAKE_CASE : List[Any] = True
__SCREAMING_SNAKE_CASE : int = True
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
lowercase_ = XGLMTokenizer(UpperCamelCase__ , keep_accents=UpperCamelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
lowercase_ = """<pad>"""
lowercase_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase__ ) , UpperCamelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase__ ) , UpperCamelCase__ )
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
lowercase_ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(len(UpperCamelCase__ ) , 1_008 )
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1_008 )
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
lowercase_ = XGLMTokenizer(UpperCamelCase__ , keep_accents=UpperCamelCase__ )
lowercase_ = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(UpperCamelCase__ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
lowercase_ = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
UpperCamelCase__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
lowercase_ = tokenizer.convert_tokens_to_ids(UpperCamelCase__ )
self.assertListEqual(
UpperCamelCase__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
lowercase_ = tokenizer.convert_ids_to_tokens(UpperCamelCase__ )
self.assertListEqual(
UpperCamelCase__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
@cached_property
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
return XGLMTokenizer.from_pretrained("""facebook/xglm-564M""" )
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(UpperCamelCase__ , f.name )
lowercase_ = XGLMTokenizer(f.name , keep_accents=UpperCamelCase__ )
lowercase_ = pickle.dumps(UpperCamelCase__ )
pickle.loads(UpperCamelCase__ )
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
lowercase_ = self.get_tokenizer()
lowercase_ = self.get_rust_tokenizer()
lowercase_ = """I was born in 92000, and this is falsé."""
lowercase_ = tokenizer.tokenize(UpperCamelCase__ )
lowercase_ = rust_tokenizer.tokenize(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
lowercase_ = tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
lowercase_ = rust_tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
lowercase_ = self.get_rust_tokenizer()
lowercase_ = tokenizer.encode(UpperCamelCase__ )
lowercase_ = rust_tokenizer.encode(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
@slow
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ = """Hello World!"""
lowercase_ = [2, 31_227, 4_447, 35]
self.assertListEqual(UpperCamelCase__ , self.big_tokenizer.encode(UpperCamelCase__ ) )
@slow
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
lowercase_ = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth"""
)
# fmt: off
lowercase_ = [2, 1_018, 67, 11, 1_988, 2_617, 5_631, 278, 11, 3_407, 48, 71_630, 28_085, 4, 3_234, 157, 13, 6, 5, 6, 4, 3_526, 768, 15, 659, 57, 298, 3_983, 864, 129, 21, 6, 5, 13_675, 377, 652, 7_580, 10_341, 155, 2_817, 422, 1_666, 7, 1_674, 53, 113, 202_277, 17_892, 33, 60, 87, 4, 3_234, 157, 61, 2_667, 52_376, 19, 88, 23, 735]
# fmt: on
self.assertListEqual(UpperCamelCase__ , self.big_tokenizer.encode(UpperCamelCase__ ) )
@slow
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ = {
"""input_ids""": [[2, 108_825, 1_163, 15, 88_010, 473, 15_898, 157, 13_672, 1_857, 312, 8, 238_021, 1_163, 53, 13_672, 1_857, 312, 8, 53_283, 182_396, 8, 18_566, 16, 36_733, 4_101, 8, 230, 244_017, 122_553, 7, 15, 132_597, 4, 293, 12_511, 7_610, 4, 3_414, 132_597, 9, 4, 32_361, 362, 4, 734, 28_512, 32_569, 18, 4, 32_361, 26_096, 14_982, 73, 18_715, 21_433, 235_261, 15, 492, 12_427, 16, 53, 18_715, 21_433, 65_454, 15, 23_659, 563, 16, 278, 597, 2_843, 595, 7_931, 182_396, 64_186, 22, 886, 595, 132_981, 53, 25_540, 3_449, 43_982, 39_901, 5_951, 878, 330, 4, 27_694, 80_269, 312, 53, 6_517, 11_780, 611, 20_408, 5], [2, 6, 132_597, 67, 42_897, 33, 592, 8, 163_729, 25_540, 361, 136_997, 109_514, 173_230, 7, 501, 60, 102_913, 196, 5_631, 235, 63_243, 473, 6, 231_757, 74, 5_277, 7_905, 53, 3_095, 37_317, 22, 454, 183_874, 5], [2, 268, 31_298, 46_530, 6, 132_935, 43_831, 7, 597, 32, 24, 3_688, 9_865, 5]],
"""attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCamelCase__ , model_name="""facebook/xglm-564M""" , padding=UpperCamelCase__ , )
| 650
| 1
|
def UpperCAmelCase_ ( UpperCAmelCase__ ):
if divisor % 5 == 0 or divisor % 2 == 0:
return 0
lowercase_ = 1
lowercase_ = 1
while repunit:
lowercase_ = (1_0 * repunit + 1) % divisor
repunit_index += 1
return repunit_index
def UpperCAmelCase_ ( UpperCAmelCase__ = 1_0_0_0_0_0_0 ):
lowercase_ = limit - 1
if divisor % 2 == 0:
divisor += 1
while least_divisible_repunit(UpperCAmelCase__ ) <= limit:
divisor += 2
return divisor
if __name__ == "__main__":
print(F'''{solution() = }''')
| 650
|
import os
import re
import warnings
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_ta import TaTokenizer
else:
a = None
a = logging.get_logger(__name__)
a = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
a = {
'vocab_file': {
't5-small': 'https://huggingface.co/t5-small/resolve/main/spiece.model',
't5-base': 'https://huggingface.co/t5-base/resolve/main/spiece.model',
't5-large': 'https://huggingface.co/t5-large/resolve/main/spiece.model',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/spiece.model',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/spiece.model',
},
'tokenizer_file': {
't5-small': 'https://huggingface.co/t5-small/resolve/main/tokenizer.json',
't5-base': 'https://huggingface.co/t5-base/resolve/main/tokenizer.json',
't5-large': 'https://huggingface.co/t5-large/resolve/main/tokenizer.json',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/tokenizer.json',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/tokenizer.json',
},
}
# TODO(PVP) - this should be removed in Transformers v5
a = {
't5-small': 5_1_2,
't5-base': 5_1_2,
't5-large': 5_1_2,
't5-3b': 5_1_2,
't5-11b': 5_1_2,
}
class UpperCamelCase__ ( __magic_name__ ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE : int = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE : str = ['input_ids', 'attention_mask']
__SCREAMING_SNAKE_CASE : Dict = TaTokenizer
__SCREAMING_SNAKE_CASE : List[int] = []
def __init__( self : int , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : Any=None , UpperCamelCase__ : Dict="</s>" , UpperCamelCase__ : Tuple="<unk>" , UpperCamelCase__ : Optional[Any]="<pad>" , UpperCamelCase__ : Union[str, Any]=100 , UpperCamelCase__ : Optional[Any]=None , **UpperCamelCase__ : List[str] , ):
'''simple docstring'''
if extra_ids > 0 and additional_special_tokens is None:
lowercase_ = [F'''<extra_id_{i}>''' for i in range(UpperCamelCase__ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra special tokens
lowercase_ = len(set(filter(lambda UpperCamelCase__ : bool("""extra_id_""" in str(UpperCamelCase__ ) ) , UpperCamelCase__ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'''
""" provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids"""
""" tokens""" )
super().__init__(
UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , extra_ids=UpperCamelCase__ , additional_special_tokens=UpperCamelCase__ , **UpperCamelCase__ , )
lowercase_ = vocab_file
lowercase_ = False if not self.vocab_file else True
lowercase_ = extra_ids
@staticmethod
def UpperCAmelCase__ ( UpperCamelCase__ : Tuple , UpperCamelCase__ : str , UpperCamelCase__ : int ):
'''simple docstring'''
if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes:
lowercase_ = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
"""This tokenizer was incorrectly instantiated with a model max length of"""
F''' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'''
""" behavior is kept to avoid breaking backwards compatibility when padding/encoding with"""
""" `truncation is True`.\n- Be aware that you SHOULD NOT rely on"""
F''' {pretrained_model_name_or_path} automatically truncating your input to'''
F''' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'''
F''' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'''
""" `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please"""
""" instantiate this tokenizer with `model_max_length` set to your preferred value.""" , UpperCamelCase__ , )
return max_model_length
def UpperCAmelCase__ ( self : int , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(UpperCamelCase__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowercase_ = os.path.join(
UpperCamelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ):
copyfile(self.vocab_file , UpperCamelCase__ )
logger.info(F'''Copy vocab file to {out_vocab_file}''' )
return (out_vocab_file,)
def UpperCAmelCase__ ( self : int , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
'''simple docstring'''
lowercase_ = token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return self.prefix_tokens + token_ids_a
else:
lowercase_ = token_ids_a + [self.eos_token_id]
return self.prefix_tokens + token_ids_a + token_ids_a
def UpperCAmelCase__ ( self : str , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
'''simple docstring'''
lowercase_ = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
return list(
set(filter(lambda UpperCamelCase__ : bool(re.search(R"""<extra_id_\d+>""" , UpperCamelCase__ ) ) is not None , self.additional_special_tokens ) ) )
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
return [self.convert_tokens_to_ids(UpperCamelCase__ ) for token in self.get_sentinel_tokens()]
| 650
| 1
|
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class UpperCamelCase__ ( __magic_name__ , __magic_name__ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Tuple = IFPipeline
__SCREAMING_SNAKE_CASE : Dict = TEXT_TO_IMAGE_PARAMS - {'width', 'height', 'latents'}
__SCREAMING_SNAKE_CASE : List[Any] = TEXT_TO_IMAGE_BATCH_PARAMS
__SCREAMING_SNAKE_CASE : List[Any] = PipelineTesterMixin.required_optional_params - {'latents'}
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
return self._get_dummy_components()
def UpperCAmelCase__ ( self : Any , UpperCamelCase__ : Dict , UpperCamelCase__ : Any=0 ):
'''simple docstring'''
if str(UpperCamelCase__ ).startswith("""mps""" ):
lowercase_ = torch.manual_seed(UpperCamelCase__ )
else:
lowercase_ = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
lowercase_ = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
super().test_save_load_floataa(expected_max_diff=1e-1 )
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
self._test_save_load_local()
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@slow
@require_torch_gpu
class UpperCamelCase__ ( unittest.TestCase ):
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
lowercase_ = IFPipeline.from_pretrained("""DeepFloyd/IF-I-XL-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa )
lowercase_ = IFSuperResolutionPipeline.from_pretrained(
"""DeepFloyd/IF-II-L-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa , text_encoder=UpperCamelCase__ , tokenizer=UpperCamelCase__ )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to("""cuda""" )
lowercase_ , lowercase_ = pipe_a.encode_prompt("""anime turtle""" , device="""cuda""" )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
lowercase_ = None
lowercase_ = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
lowercase_ = IFImgaImgPipeline(**pipe_a.components )
lowercase_ = IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
lowercase_ = IFInpaintingPipeline(**pipe_a.components )
lowercase_ = IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def UpperCAmelCase__ ( self : Union[str, Any] , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any ):
'''simple docstring'''
_start_torch_memory_measurement()
lowercase_ = torch.Generator(device="""cpu""" ).manual_seed(0 )
lowercase_ = pipe_a(
prompt_embeds=UpperCamelCase__ , negative_prompt_embeds=UpperCamelCase__ , num_inference_steps=2 , generator=UpperCamelCase__ , output_type="""np""" , )
lowercase_ = output.images[0]
assert image.shape == (64, 64, 3)
lowercase_ = torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
lowercase_ = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy""" )
assert_mean_pixel_difference(UpperCamelCase__ , UpperCamelCase__ )
# pipeline 2
_start_torch_memory_measurement()
lowercase_ = torch.Generator(device="""cpu""" ).manual_seed(0 )
lowercase_ = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(UpperCamelCase__ )
lowercase_ = pipe_a(
prompt_embeds=UpperCamelCase__ , negative_prompt_embeds=UpperCamelCase__ , image=UpperCamelCase__ , generator=UpperCamelCase__ , num_inference_steps=2 , output_type="""np""" , )
lowercase_ = output.images[0]
assert image.shape == (256, 256, 3)
lowercase_ = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
lowercase_ = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy""" )
assert_mean_pixel_difference(UpperCamelCase__ , UpperCamelCase__ )
def UpperCAmelCase__ ( self : int , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] ):
'''simple docstring'''
_start_torch_memory_measurement()
lowercase_ = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(UpperCamelCase__ )
lowercase_ = torch.Generator(device="""cpu""" ).manual_seed(0 )
lowercase_ = pipe_a(
prompt_embeds=UpperCamelCase__ , negative_prompt_embeds=UpperCamelCase__ , image=UpperCamelCase__ , num_inference_steps=2 , generator=UpperCamelCase__ , output_type="""np""" , )
lowercase_ = output.images[0]
assert image.shape == (64, 64, 3)
lowercase_ = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
lowercase_ = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy""" )
assert_mean_pixel_difference(UpperCamelCase__ , UpperCamelCase__ )
# pipeline 2
_start_torch_memory_measurement()
lowercase_ = torch.Generator(device="""cpu""" ).manual_seed(0 )
lowercase_ = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(UpperCamelCase__ )
lowercase_ = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(UpperCamelCase__ )
lowercase_ = pipe_a(
prompt_embeds=UpperCamelCase__ , negative_prompt_embeds=UpperCamelCase__ , image=UpperCamelCase__ , original_image=UpperCamelCase__ , generator=UpperCamelCase__ , num_inference_steps=2 , output_type="""np""" , )
lowercase_ = output.images[0]
assert image.shape == (256, 256, 3)
lowercase_ = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
lowercase_ = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy""" )
assert_mean_pixel_difference(UpperCamelCase__ , UpperCamelCase__ )
def UpperCAmelCase__ ( self : str , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
_start_torch_memory_measurement()
lowercase_ = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(UpperCamelCase__ )
lowercase_ = floats_tensor((1, 3, 64, 64) , rng=random.Random(1 ) ).to(UpperCamelCase__ )
lowercase_ = torch.Generator(device="""cpu""" ).manual_seed(0 )
lowercase_ = pipe_a(
prompt_embeds=UpperCamelCase__ , negative_prompt_embeds=UpperCamelCase__ , image=UpperCamelCase__ , mask_image=UpperCamelCase__ , num_inference_steps=2 , generator=UpperCamelCase__ , output_type="""np""" , )
lowercase_ = output.images[0]
assert image.shape == (64, 64, 3)
lowercase_ = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
lowercase_ = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy""" )
assert_mean_pixel_difference(UpperCamelCase__ , UpperCamelCase__ )
# pipeline 2
_start_torch_memory_measurement()
lowercase_ = torch.Generator(device="""cpu""" ).manual_seed(0 )
lowercase_ = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(UpperCamelCase__ )
lowercase_ = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(UpperCamelCase__ )
lowercase_ = floats_tensor((1, 3, 256, 256) , rng=random.Random(1 ) ).to(UpperCamelCase__ )
lowercase_ = pipe_a(
prompt_embeds=UpperCamelCase__ , negative_prompt_embeds=UpperCamelCase__ , image=UpperCamelCase__ , mask_image=UpperCamelCase__ , original_image=UpperCamelCase__ , generator=UpperCamelCase__ , num_inference_steps=2 , output_type="""np""" , )
lowercase_ = output.images[0]
assert image.shape == (256, 256, 3)
lowercase_ = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
lowercase_ = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy""" )
assert_mean_pixel_difference(UpperCamelCase__ , UpperCamelCase__ )
def UpperCAmelCase_ ( ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 650
|
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMInverseScheduler,
DDIMScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
StableDiffusionDiffEditPipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_image, slow
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class UpperCamelCase__ ( __magic_name__ , __magic_name__ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Optional[int] = StableDiffusionDiffEditPipeline
__SCREAMING_SNAKE_CASE : Optional[Any] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'height', 'width', 'image'} | {'image_latents'}
__SCREAMING_SNAKE_CASE : List[str] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {'image'} | {'image_latents'}
__SCREAMING_SNAKE_CASE : int = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__SCREAMING_SNAKE_CASE : Any = frozenset([] )
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=UpperCamelCase__ , )
lowercase_ = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=UpperCamelCase__ , set_alpha_to_one=UpperCamelCase__ , )
lowercase_ = DDIMInverseScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=UpperCamelCase__ , set_alpha_to_zero=UpperCamelCase__ , )
torch.manual_seed(0 )
lowercase_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
lowercase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="""gelu""" , projection_dim=512 , )
lowercase_ = CLIPTextModel(UpperCamelCase__ )
lowercase_ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
lowercase_ = {
"""unet""": unet,
"""scheduler""": scheduler,
"""inverse_scheduler""": inverse_scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def UpperCAmelCase__ ( self : int , UpperCamelCase__ : Any , UpperCamelCase__ : Any=0 ):
'''simple docstring'''
lowercase_ = floats_tensor((1, 16, 16) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
lowercase_ = floats_tensor((1, 2, 4, 16, 16) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
if str(UpperCamelCase__ ).startswith("""mps""" ):
lowercase_ = torch.manual_seed(UpperCamelCase__ )
else:
lowercase_ = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
lowercase_ = {
"""prompt""": """a dog and a newt""",
"""mask_image""": mask,
"""image_latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 2,
"""inpaint_strength""": 1.0,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def UpperCAmelCase__ ( self : Optional[int] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : str=0 ):
'''simple docstring'''
lowercase_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
lowercase_ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowercase_ = Image.fromarray(np.uinta(UpperCamelCase__ ) ).convert("""RGB""" )
if str(UpperCamelCase__ ).startswith("""mps""" ):
lowercase_ = torch.manual_seed(UpperCamelCase__ )
else:
lowercase_ = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
lowercase_ = {
"""image""": image,
"""source_prompt""": """a cat and a frog""",
"""target_prompt""": """a dog and a newt""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""num_maps_per_mask""": 2,
"""mask_encode_strength""": 1.0,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def UpperCAmelCase__ ( self : List[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple=0 ):
'''simple docstring'''
lowercase_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
lowercase_ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowercase_ = Image.fromarray(np.uinta(UpperCamelCase__ ) ).convert("""RGB""" )
if str(UpperCamelCase__ ).startswith("""mps""" ):
lowercase_ = torch.manual_seed(UpperCamelCase__ )
else:
lowercase_ = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
lowercase_ = {
"""image""": image,
"""prompt""": """a cat and a frog""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""inpaint_strength""": 1.0,
"""guidance_scale""": 6.0,
"""decode_latents""": True,
"""output_type""": """numpy""",
}
return inputs
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
if not hasattr(self.pipeline_class , """_optional_components""" ):
return
lowercase_ = self.get_dummy_components()
lowercase_ = self.pipeline_class(**UpperCamelCase__ )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
# set all optional components to None and update pipeline config accordingly
for optional_component in pipe._optional_components:
setattr(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} )
lowercase_ = self.get_dummy_inputs(UpperCamelCase__ )
lowercase_ = pipe(**UpperCamelCase__ )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(UpperCamelCase__ )
lowercase_ = self.pipeline_class.from_pretrained(UpperCamelCase__ )
pipe_loaded.to(UpperCamelCase__ )
pipe_loaded.set_progress_bar_config(disable=UpperCamelCase__ )
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(UpperCamelCase__ , UpperCamelCase__ ) is None , F'''`{optional_component}` did not stay set to None after loading.''' , )
lowercase_ = self.get_dummy_inputs(UpperCamelCase__ )
lowercase_ = pipe_loaded(**UpperCamelCase__ )[0]
lowercase_ = np.abs(output - output_loaded ).max()
self.assertLess(UpperCamelCase__ , 1e-4 )
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase_ = """cpu"""
lowercase_ = self.get_dummy_components()
lowercase_ = self.pipeline_class(**UpperCamelCase__ )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
lowercase_ = self.get_dummy_mask_inputs(UpperCamelCase__ )
lowercase_ = pipe.generate_mask(**UpperCamelCase__ )
lowercase_ = mask[0, -3:, -3:]
self.assertEqual(mask.shape , (1, 16, 16) )
lowercase_ = np.array([0] * 9 )
lowercase_ = np.abs(mask_slice.flatten() - expected_slice ).max()
self.assertLessEqual(UpperCamelCase__ , 1e-3 )
self.assertEqual(mask[0, -3, -4] , 0 )
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ = """cpu"""
lowercase_ = self.get_dummy_components()
lowercase_ = self.pipeline_class(**UpperCamelCase__ )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
lowercase_ = self.get_dummy_inversion_inputs(UpperCamelCase__ )
lowercase_ = pipe.invert(**UpperCamelCase__ ).images
lowercase_ = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
lowercase_ = np.array(
[0.5_150, 0.5_134, 0.5_043, 0.5_376, 0.4_694, 0.51_050, 0.5_015, 0.4_407, 0.4_799] , )
lowercase_ = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(UpperCamelCase__ , 1e-3 )
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=5e-3 )
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ = """cpu"""
lowercase_ = self.get_dummy_components()
lowercase_ = {"""beta_start""": 0.00_085, """beta_end""": 0.012, """beta_schedule""": """scaled_linear"""}
lowercase_ = DPMSolverMultistepScheduler(**UpperCamelCase__ )
lowercase_ = DPMSolverMultistepInverseScheduler(**UpperCamelCase__ )
lowercase_ = self.pipeline_class(**UpperCamelCase__ )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
lowercase_ = self.get_dummy_inversion_inputs(UpperCamelCase__ )
lowercase_ = pipe.invert(**UpperCamelCase__ ).images
lowercase_ = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
lowercase_ = np.array(
[0.5_150, 0.5_134, 0.5_043, 0.5_376, 0.4_694, 0.51_050, 0.5_015, 0.4_407, 0.4_799] , )
lowercase_ = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(UpperCamelCase__ , 1e-3 )
@require_torch_gpu
@slow
class UpperCamelCase__ ( unittest.TestCase ):
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@classmethod
def UpperCAmelCase__ ( cls : Dict ):
'''simple docstring'''
lowercase_ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png""" )
lowercase_ = raw_image.convert("""RGB""" ).resize((768, 768) )
lowercase_ = raw_image
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
lowercase_ = torch.manual_seed(0 )
lowercase_ = StableDiffusionDiffEditPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2-1""" , safety_checker=UpperCamelCase__ , torch_dtype=torch.floataa )
lowercase_ = DDIMScheduler.from_config(pipe.scheduler.config )
lowercase_ = DDIMInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
lowercase_ = """a bowl of fruit"""
lowercase_ = """a bowl of pears"""
lowercase_ = pipe.generate_mask(
image=self.raw_image , source_prompt=UpperCamelCase__ , target_prompt=UpperCamelCase__ , generator=UpperCamelCase__ , )
lowercase_ = pipe.invert(
prompt=UpperCamelCase__ , image=self.raw_image , inpaint_strength=0.7 , generator=UpperCamelCase__ ).latents
lowercase_ = pipe(
prompt=UpperCamelCase__ , mask_image=UpperCamelCase__ , image_latents=UpperCamelCase__ , generator=UpperCamelCase__ , negative_prompt=UpperCamelCase__ , inpaint_strength=0.7 , output_type="""numpy""" , ).images[0]
lowercase_ = (
np.array(
load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/diffedit/pears.png""" ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5e-1
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ = torch.manual_seed(0 )
lowercase_ = StableDiffusionDiffEditPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2-1""" , safety_checker=UpperCamelCase__ , torch_dtype=torch.floataa )
lowercase_ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
lowercase_ = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
lowercase_ = """a bowl of fruit"""
lowercase_ = """a bowl of pears"""
lowercase_ = pipe.generate_mask(
image=self.raw_image , source_prompt=UpperCamelCase__ , target_prompt=UpperCamelCase__ , generator=UpperCamelCase__ , )
lowercase_ = pipe.invert(
prompt=UpperCamelCase__ , image=self.raw_image , inpaint_strength=0.7 , generator=UpperCamelCase__ , num_inference_steps=25 , ).latents
lowercase_ = pipe(
prompt=UpperCamelCase__ , mask_image=UpperCamelCase__ , image_latents=UpperCamelCase__ , generator=UpperCamelCase__ , negative_prompt=UpperCamelCase__ , inpaint_strength=0.7 , num_inference_steps=25 , output_type="""numpy""" , ).images[0]
lowercase_ = (
np.array(
load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/diffedit/pears.png""" ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5e-1
| 650
| 1
|
def UpperCAmelCase_ ( UpperCAmelCase__ = 1_0_0_0 ):
lowercase_ = 3
lowercase_ = 0
while a < n:
if a % 3 == 0 or a % 5 == 0:
result += a
elif a % 1_5 == 0:
result -= a
a += 1
return result
if __name__ == "__main__":
print(F'''{solution() = }''')
| 650
|
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
a = logging.get_logger(__name__)
class UpperCamelCase__ ( __magic_name__ ):
__SCREAMING_SNAKE_CASE : str = ['pixel_values']
def __init__( self : List[Any] , UpperCamelCase__ : bool = True , UpperCamelCase__ : Union[int, float] = 1 / 255 , UpperCamelCase__ : bool = True , UpperCamelCase__ : int = 8 , **UpperCamelCase__ : Optional[Any] , ):
'''simple docstring'''
super().__init__(**UpperCamelCase__ )
lowercase_ = do_rescale
lowercase_ = rescale_factor
lowercase_ = do_pad
lowercase_ = pad_size
def UpperCAmelCase__ ( self : int , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : float , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
return rescale(UpperCamelCase__ , scale=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def UpperCAmelCase__ ( self : List[Any] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : int , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None ):
'''simple docstring'''
lowercase_ , lowercase_ = get_image_size(UpperCamelCase__ )
lowercase_ = (old_height // size + 1) * size - old_height
lowercase_ = (old_width // size + 1) * size - old_width
return pad(UpperCamelCase__ , ((0, pad_height), (0, pad_width)) , mode="""symmetric""" , data_format=UpperCamelCase__ )
def UpperCAmelCase__ ( self : Optional[int] , UpperCamelCase__ : ImageInput , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[float] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , UpperCamelCase__ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **UpperCamelCase__ : Dict , ):
'''simple docstring'''
lowercase_ = do_rescale if do_rescale is not None else self.do_rescale
lowercase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase_ = do_pad if do_pad is not None else self.do_pad
lowercase_ = pad_size if pad_size is not None else self.pad_size
lowercase_ = make_list_of_images(UpperCamelCase__ )
if not valid_images(UpperCamelCase__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
# All transformations expect numpy arrays.
lowercase_ = [to_numpy_array(UpperCamelCase__ ) for image in images]
if do_rescale:
lowercase_ = [self.rescale(image=UpperCamelCase__ , scale=UpperCamelCase__ ) for image in images]
if do_pad:
lowercase_ = [self.pad(UpperCamelCase__ , size=UpperCamelCase__ ) for image in images]
lowercase_ = [to_channel_dimension_format(UpperCamelCase__ , UpperCamelCase__ ) for image in images]
lowercase_ = {"""pixel_values""": images}
return BatchFeature(data=UpperCamelCase__ , tensor_type=UpperCamelCase__ )
| 650
| 1
|
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class UpperCamelCase__ :
def __init__( self : Optional[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[int]=13 , UpperCamelCase__ : Dict=7 , UpperCamelCase__ : Any=True , UpperCamelCase__ : Union[str, Any]=True , UpperCamelCase__ : int=True , UpperCamelCase__ : Union[str, Any]=99 , UpperCamelCase__ : Tuple=32 , UpperCamelCase__ : str=5 , UpperCamelCase__ : List[Any]=4 , UpperCamelCase__ : List[Any]=37 , UpperCamelCase__ : Any="gelu" , UpperCamelCase__ : Optional[int]=0.1 , UpperCamelCase__ : Dict=0.1 , UpperCamelCase__ : int=512 , UpperCamelCase__ : int=16 , UpperCamelCase__ : Tuple=2 , UpperCamelCase__ : int=0.02 , UpperCamelCase__ : Union[str, Any]=3 , UpperCamelCase__ : Dict=4 , UpperCamelCase__ : List[str]=None , ):
'''simple docstring'''
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = seq_length
lowercase_ = is_training
lowercase_ = use_token_type_ids
lowercase_ = use_labels
lowercase_ = vocab_size
lowercase_ = hidden_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = intermediate_size
lowercase_ = hidden_act
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = max_position_embeddings
lowercase_ = type_vocab_size
lowercase_ = type_sequence_label_size
lowercase_ = initializer_range
lowercase_ = num_labels
lowercase_ = num_choices
lowercase_ = scope
lowercase_ = self.vocab_size - 1
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase_ = None
if self.use_token_type_ids:
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase_ = None
lowercase_ = None
lowercase_ = None
if self.use_labels:
lowercase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase_ = ids_tensor([self.batch_size] , self.num_choices )
lowercase_ = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
lowercase_ = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def UpperCAmelCase__ ( self : str , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Dict , UpperCamelCase__ : Dict , *UpperCamelCase__ : Tuple ):
'''simple docstring'''
lowercase_ = OpenAIGPTModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowercase_ = model(UpperCamelCase__ , token_type_ids=UpperCamelCase__ , head_mask=UpperCamelCase__ )
lowercase_ = model(UpperCamelCase__ , token_type_ids=UpperCamelCase__ )
lowercase_ = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase__ ( self : Optional[int] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] , *UpperCamelCase__ : Union[str, Any] ):
'''simple docstring'''
lowercase_ = OpenAIGPTLMHeadModel(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowercase_ = model(UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase__ ( self : Optional[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : int , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Union[str, Any] , *UpperCamelCase__ : int ):
'''simple docstring'''
lowercase_ = OpenAIGPTDoubleHeadsModel(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowercase_ = model(UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase__ ( self : Any , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any , *UpperCamelCase__ : Any ):
'''simple docstring'''
lowercase_ = self.num_labels
lowercase_ = OpenAIGPTForSequenceClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowercase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase_ = model(UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ = self.prepare_config_and_inputs()
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) = config_and_inputs
lowercase_ = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""head_mask""": head_mask,
}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : str = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
__SCREAMING_SNAKE_CASE : List[Any] = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
__SCREAMING_SNAKE_CASE : str = (
{
'feature-extraction': OpenAIGPTModel,
'text-classification': OpenAIGPTForSequenceClassification,
'text-generation': OpenAIGPTLMHeadModel,
'zero-shot': OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def UpperCAmelCase__ ( self : int , UpperCamelCase__ : List[str] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : str , UpperCamelCase__ : Tuple , UpperCamelCase__ : int ):
'''simple docstring'''
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def UpperCAmelCase__ ( self : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : Dict , UpperCamelCase__ : Dict=False ):
'''simple docstring'''
lowercase_ = super()._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
lowercase_ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=UpperCamelCase__ , )
lowercase_ = inputs_dict["""labels"""]
lowercase_ = inputs_dict["""labels"""]
lowercase_ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=UpperCamelCase__ , )
lowercase_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCamelCase__ )
return inputs_dict
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
lowercase_ = OpenAIGPTModelTester(self )
lowercase_ = ConfigTester(self , config_class=UpperCamelCase__ , n_embd=37 )
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*UpperCamelCase__ )
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*UpperCamelCase__ )
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*UpperCamelCase__ )
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*UpperCamelCase__ )
@slow
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ = OpenAIGPTModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
@slow
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
lowercase_ = OpenAIGPTLMHeadModel.from_pretrained("""openai-gpt""" )
model.to(UpperCamelCase__ )
lowercase_ = torch.tensor([[481, 4_735, 544]] , dtype=torch.long , device=UpperCamelCase__ ) # the president is
lowercase_ = [
481,
4_735,
544,
246,
963,
870,
762,
239,
244,
40_477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
lowercase_ = model.generate(UpperCamelCase__ , do_sample=UpperCamelCase__ )
self.assertListEqual(output_ids[0].tolist() , UpperCamelCase__ )
| 650
|
def UpperCAmelCase_ ( UpperCAmelCase__ ):
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
raise TypeError("""Input value must be an 'int' type""" )
lowercase_ = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 650
| 1
|
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ):
return base * power(UpperCAmelCase__ , (exponent - 1) ) if exponent else 1
if __name__ == "__main__":
print('Raise base to the power of exponent using recursion...')
a = int(input('Enter the base: ').strip())
a = int(input('Enter the exponent: ').strip())
a = power(base, abs(exponent))
if exponent < 0: # power() does not properly deal w/ negative exponents
a = 1 / result
print(F'''{base} to the power of {exponent} is {result}''')
| 650
|
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class UpperCamelCase__ ( __magic_name__ , __magic_name__ , __magic_name__ ):
@register_to_config
def __init__( self : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : float , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : str , UpperCamelCase__ : bool = False , ):
'''simple docstring'''
super().__init__()
lowercase_ = nn.Embedding(UpperCamelCase__ , UpperCamelCase__ )
lowercase_ = nn.Embedding(UpperCamelCase__ , UpperCamelCase__ )
lowercase_ = False
lowercase_ = nn.Dropout(p=UpperCamelCase__ )
lowercase_ = TaConfig(
vocab_size=UpperCamelCase__ , d_model=UpperCamelCase__ , num_heads=UpperCamelCase__ , d_kv=UpperCamelCase__ , d_ff=UpperCamelCase__ , dropout_rate=UpperCamelCase__ , feed_forward_proj=UpperCamelCase__ , is_decoder=UpperCamelCase__ , is_encoder_decoder=UpperCamelCase__ , )
lowercase_ = nn.ModuleList()
for lyr_num in range(UpperCamelCase__ ):
lowercase_ = TaBlock(UpperCamelCase__ )
self.encoders.append(UpperCamelCase__ )
lowercase_ = TaLayerNorm(UpperCamelCase__ )
lowercase_ = nn.Dropout(p=UpperCamelCase__ )
def UpperCAmelCase__ ( self : Optional[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : str ):
'''simple docstring'''
lowercase_ = self.token_embedder(UpperCamelCase__ )
lowercase_ = encoder_input_tokens.shape[1]
lowercase_ = torch.arange(UpperCamelCase__ , device=encoder_input_tokens.device )
x += self.position_encoding(UpperCamelCase__ )
lowercase_ = self.dropout_pre(UpperCamelCase__ )
# inverted the attention mask
lowercase_ = encoder_input_tokens.size()
lowercase_ = self.get_extended_attention_mask(UpperCamelCase__ , UpperCamelCase__ )
for lyr in self.encoders:
lowercase_ = lyr(UpperCamelCase__ , UpperCamelCase__ )[0]
lowercase_ = self.layer_norm(UpperCamelCase__ )
return self.dropout_post(UpperCamelCase__ ), encoder_inputs_mask
| 650
| 1
|
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
a = get_logger(__name__)
class UpperCamelCase__ :
def __init__( self : Union[str, Any] , UpperCamelCase__ : Optional[str] = None ):
'''simple docstring'''
lowercase_ = (
os.path.join(UpperCamelCase__ , config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
lowercase_ = Extractor
def UpperCAmelCase__ ( self : List[Any] , UpperCamelCase__ : str ):
'''simple docstring'''
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
lowercase_ = os.path.abspath(UpperCamelCase__ )
return os.path.join(self.extract_dir , hash_url_to_filename(UpperCamelCase__ ) )
def UpperCAmelCase__ ( self : Optional[Any] , UpperCamelCase__ : str , UpperCamelCase__ : bool ):
'''simple docstring'''
return force_extract or (
not os.path.isfile(UpperCamelCase__ ) and not (os.path.isdir(UpperCamelCase__ ) and os.listdir(UpperCamelCase__ ))
)
def UpperCAmelCase__ ( self : Tuple , UpperCamelCase__ : str , UpperCamelCase__ : bool = False ):
'''simple docstring'''
lowercase_ = self.extractor.infer_extractor_format(UpperCamelCase__ )
if not extractor_format:
return input_path
lowercase_ = self._get_output_path(UpperCamelCase__ )
if self._do_extract(UpperCamelCase__ , UpperCamelCase__ ):
self.extractor.extract(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return output_path
class UpperCamelCase__ ( __magic_name__ ):
@classmethod
@abstractmethod
def UpperCAmelCase__ ( cls : Optional[Any] , UpperCamelCase__ : Union[Path, str] , **UpperCamelCase__ : int ):
'''simple docstring'''
...
@staticmethod
@abstractmethod
def UpperCAmelCase__ ( UpperCamelCase__ : Union[Path, str] , UpperCamelCase__ : Union[Path, str] ):
'''simple docstring'''
...
class UpperCamelCase__ ( __magic_name__ , __magic_name__ ):
__SCREAMING_SNAKE_CASE : List[bytes] = []
@staticmethod
def UpperCAmelCase__ ( UpperCamelCase__ : Union[Path, str] , UpperCamelCase__ : int ):
'''simple docstring'''
with open(UpperCamelCase__ , """rb""" ) as f:
return f.read(UpperCamelCase__ )
@classmethod
def UpperCAmelCase__ ( cls : List[Any] , UpperCamelCase__ : Union[Path, str] , UpperCamelCase__ : bytes = b"" ):
'''simple docstring'''
if not magic_number:
lowercase_ = max(len(UpperCamelCase__ ) for cls_magic_number in cls.magic_numbers )
try:
lowercase_ = cls.read_magic_number(UpperCamelCase__ , UpperCamelCase__ )
except OSError:
return False
return any(magic_number.startswith(UpperCamelCase__ ) for cls_magic_number in cls.magic_numbers )
class UpperCamelCase__ ( __magic_name__ ):
@classmethod
def UpperCAmelCase__ ( cls : int , UpperCamelCase__ : Union[Path, str] , **UpperCamelCase__ : int ):
'''simple docstring'''
return tarfile.is_tarfile(UpperCamelCase__ )
@staticmethod
def UpperCAmelCase__ ( UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : str ):
'''simple docstring'''
def resolved(UpperCamelCase__ : str ) -> str:
return os.path.realpath(os.path.abspath(UpperCamelCase__ ) )
def badpath(UpperCamelCase__ : str , UpperCamelCase__ : str ) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) ).startswith(UpperCamelCase__ )
def badlink(UpperCamelCase__ : List[str] , UpperCamelCase__ : str ) -> bool:
# Links are interpreted relative to the directory containing the link
lowercase_ = resolved(os.path.join(UpperCamelCase__ , os.path.dirname(info.name ) ) )
return badpath(info.linkname , base=UpperCamelCase__ )
lowercase_ = resolved(UpperCamelCase__ )
for finfo in members:
if badpath(finfo.name , UpperCamelCase__ ):
logger.error(F'''Extraction of {finfo.name} is blocked (illegal path)''' )
elif finfo.issym() and badlink(UpperCamelCase__ , UpperCamelCase__ ):
logger.error(F'''Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}''' )
elif finfo.islnk() and badlink(UpperCamelCase__ , UpperCamelCase__ ):
logger.error(F'''Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}''' )
else:
yield finfo
@staticmethod
def UpperCAmelCase__ ( UpperCamelCase__ : Union[Path, str] , UpperCamelCase__ : Union[Path, str] ):
'''simple docstring'''
os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ )
lowercase_ = tarfile.open(UpperCamelCase__ )
tar_file.extractall(UpperCamelCase__ , members=TarExtractor.safemembers(UpperCamelCase__ , UpperCamelCase__ ) )
tar_file.close()
class UpperCamelCase__ ( __magic_name__ ):
__SCREAMING_SNAKE_CASE : Any = [B'\x1F\x8B']
@staticmethod
def UpperCAmelCase__ ( UpperCamelCase__ : Union[Path, str] , UpperCamelCase__ : Union[Path, str] ):
'''simple docstring'''
with gzip.open(UpperCamelCase__ , """rb""" ) as gzip_file:
with open(UpperCamelCase__ , """wb""" ) as extracted_file:
shutil.copyfileobj(UpperCamelCase__ , UpperCamelCase__ )
class UpperCamelCase__ ( __magic_name__ ):
__SCREAMING_SNAKE_CASE : Optional[Any] = [
B'PK\x03\x04',
B'PK\x05\x06', # empty archive
B'PK\x07\x08', # spanned archive
]
@classmethod
def UpperCAmelCase__ ( cls : int , UpperCamelCase__ : Union[Path, str] , UpperCamelCase__ : bytes = b"" ):
'''simple docstring'''
if super().is_extractable(UpperCamelCase__ , magic_number=UpperCamelCase__ ):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(UpperCamelCase__ , """rb""" ) as fp:
lowercase_ = _EndRecData(UpperCamelCase__ )
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
lowercase_ = fp.read(UpperCamelCase__ ) # CD is where we expect it to be
if len(UpperCamelCase__ ) == sizeCentralDir:
lowercase_ = struct.unpack(UpperCamelCase__ , UpperCamelCase__ ) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def UpperCAmelCase__ ( UpperCamelCase__ : Union[Path, str] , UpperCamelCase__ : Union[Path, str] ):
'''simple docstring'''
os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ )
with zipfile.ZipFile(UpperCamelCase__ , """r""" ) as zip_file:
zip_file.extractall(UpperCamelCase__ )
zip_file.close()
class UpperCamelCase__ ( __magic_name__ ):
__SCREAMING_SNAKE_CASE : Optional[Any] = [B'\xFD\x37\x7A\x58\x5A\x00']
@staticmethod
def UpperCAmelCase__ ( UpperCamelCase__ : Union[Path, str] , UpperCamelCase__ : Union[Path, str] ):
'''simple docstring'''
with lzma.open(UpperCamelCase__ ) as compressed_file:
with open(UpperCamelCase__ , """wb""" ) as extracted_file:
shutil.copyfileobj(UpperCamelCase__ , UpperCamelCase__ )
class UpperCamelCase__ ( __magic_name__ ):
__SCREAMING_SNAKE_CASE : Optional[Any] = [B'Rar!\x1a\x07\x00', B'Rar!\x1a\x07\x01\x00'] # RAR_ID # RAR5_ID
@staticmethod
def UpperCAmelCase__ ( UpperCamelCase__ : Union[Path, str] , UpperCamelCase__ : Union[Path, str] ):
'''simple docstring'''
if not config.RARFILE_AVAILABLE:
raise ImportError("""Please pip install rarfile""" )
import rarfile
os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ )
lowercase_ = rarfile.RarFile(UpperCamelCase__ )
rf.extractall(UpperCamelCase__ )
rf.close()
class UpperCamelCase__ ( __magic_name__ ):
__SCREAMING_SNAKE_CASE : List[str] = [B'\x28\xb5\x2F\xFD']
@staticmethod
def UpperCAmelCase__ ( UpperCamelCase__ : Union[Path, str] , UpperCamelCase__ : Union[Path, str] ):
'''simple docstring'''
if not config.ZSTANDARD_AVAILABLE:
raise ImportError("""Please pip install zstandard""" )
import zstandard as zstd
lowercase_ = zstd.ZstdDecompressor()
with open(UpperCamelCase__ , """rb""" ) as ifh, open(UpperCamelCase__ , """wb""" ) as ofh:
dctx.copy_stream(UpperCamelCase__ , UpperCamelCase__ )
class UpperCamelCase__ ( __magic_name__ ):
__SCREAMING_SNAKE_CASE : List[str] = [B'\x42\x5A\x68']
@staticmethod
def UpperCAmelCase__ ( UpperCamelCase__ : Union[Path, str] , UpperCamelCase__ : Union[Path, str] ):
'''simple docstring'''
with bza.open(UpperCamelCase__ , """rb""" ) as compressed_file:
with open(UpperCamelCase__ , """wb""" ) as extracted_file:
shutil.copyfileobj(UpperCamelCase__ , UpperCamelCase__ )
class UpperCamelCase__ ( __magic_name__ ):
__SCREAMING_SNAKE_CASE : int = [B'\x37\x7A\xBC\xAF\x27\x1C']
@staticmethod
def UpperCAmelCase__ ( UpperCamelCase__ : Union[Path, str] , UpperCamelCase__ : Union[Path, str] ):
'''simple docstring'''
if not config.PY7ZR_AVAILABLE:
raise ImportError("""Please pip install py7zr""" )
import pyazr
os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ )
with pyazr.SevenZipFile(UpperCamelCase__ , """r""" ) as archive:
archive.extractall(UpperCamelCase__ )
class UpperCamelCase__ ( __magic_name__ ):
__SCREAMING_SNAKE_CASE : Any = [B'\x04\x22\x4D\x18']
@staticmethod
def UpperCAmelCase__ ( UpperCamelCase__ : Union[Path, str] , UpperCamelCase__ : Union[Path, str] ):
'''simple docstring'''
if not config.LZ4_AVAILABLE:
raise ImportError("""Please pip install lz4""" )
import lza.frame
with lza.frame.open(UpperCamelCase__ , """rb""" ) as compressed_file:
with open(UpperCamelCase__ , """wb""" ) as extracted_file:
shutil.copyfileobj(UpperCamelCase__ , UpperCamelCase__ )
class UpperCamelCase__ :
# Put zip file to the last, b/c it is possible wrongly detected as zip (I guess it means: as tar or gzip)
__SCREAMING_SNAKE_CASE : Dict[str, Type[BaseExtractor]] = {
"tar": TarExtractor,
"gzip": GzipExtractor,
"zip": ZipExtractor,
"xz": XzExtractor,
"rar": RarExtractor,
"zstd": ZstdExtractor,
"bz2": BzipaExtractor,
"7z": SevenZipExtractor, # <Added version="2.4.0"/>
"lz4": LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def UpperCAmelCase__ ( cls : Tuple ):
'''simple docstring'''
return max(
len(UpperCamelCase__ )
for extractor in cls.extractors.values()
if issubclass(UpperCamelCase__ , UpperCamelCase__ )
for extractor_magic_number in extractor.magic_numbers )
@staticmethod
def UpperCAmelCase__ ( UpperCamelCase__ : Union[Path, str] , UpperCamelCase__ : int ):
'''simple docstring'''
try:
return MagicNumberBaseExtractor.read_magic_number(UpperCamelCase__ , magic_number_length=UpperCamelCase__ )
except OSError:
return b""
@classmethod
def UpperCAmelCase__ ( cls : str , UpperCamelCase__ : Union[Path, str] , UpperCamelCase__ : bool = False ):
'''simple docstring'''
warnings.warn(
"""Method 'is_extractable' was deprecated in version 2.4.0 and will be removed in 3.0.0. """
"""Use 'infer_extractor_format' instead.""" , category=UpperCamelCase__ , )
lowercase_ = cls.infer_extractor_format(UpperCamelCase__ )
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def UpperCAmelCase__ ( cls : int , UpperCamelCase__ : Union[Path, str] ): # <Added version="2.4.0"/>
'''simple docstring'''
lowercase_ = cls._get_magic_number_max_length()
lowercase_ = cls._read_magic_number(UpperCamelCase__ , UpperCamelCase__ )
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(UpperCamelCase__ , magic_number=UpperCamelCase__ ):
return extractor_format
@classmethod
def UpperCAmelCase__ ( cls : int , UpperCamelCase__ : Union[Path, str] , UpperCamelCase__ : Union[Path, str] , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : Optional[BaseExtractor] = "deprecated" , ):
'''simple docstring'''
os.makedirs(os.path.dirname(UpperCamelCase__ ) , exist_ok=UpperCamelCase__ )
# Prevent parallel extractions
lowercase_ = str(Path(UpperCamelCase__ ).with_suffix(""".lock""" ) )
with FileLock(UpperCamelCase__ ):
shutil.rmtree(UpperCamelCase__ , ignore_errors=UpperCamelCase__ )
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(UpperCamelCase__ , UpperCamelCase__ ): # passed as positional arg
warnings.warn(
"""Parameter 'extractor' was deprecated in version 2.4.0 and will be removed in 3.0.0. """
"""Use 'extractor_format' instead.""" , category=UpperCamelCase__ , )
lowercase_ = extractor if extractor != """deprecated""" else extractor_format
else:
lowercase_ = cls.extractors[extractor_format]
return extractor.extract(UpperCamelCase__ , UpperCamelCase__ )
else:
warnings.warn(
"""Parameter 'extractor_format' was made required in version 2.4.0 and not passing it will raise an """
"""exception in 3.0.0.""" , category=UpperCamelCase__ , )
for extractor in cls.extractors.values():
if extractor.is_extractable(UpperCamelCase__ ):
return extractor.extract(UpperCamelCase__ , UpperCamelCase__ )
| 650
|
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
a = TypeVar('T')
class UpperCamelCase__ ( Generic[T] ):
__SCREAMING_SNAKE_CASE : deque[T] # Cache store of keys
__SCREAMING_SNAKE_CASE : set[T] # References of the keys in cache
__SCREAMING_SNAKE_CASE : int = 10 # Maximum capacity of cache
def __init__( self : str , UpperCamelCase__ : int ):
'''simple docstring'''
lowercase_ = deque()
lowercase_ = set()
if not n:
lowercase_ = sys.maxsize
elif n < 0:
raise ValueError("""n should be an integer greater than 0.""" )
else:
lowercase_ = n
def UpperCAmelCase__ ( self : Optional[int] , UpperCamelCase__ : T ):
'''simple docstring'''
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
lowercase_ = self.dq_store.pop()
self.key_reference.remove(UpperCamelCase__ )
else:
self.dq_store.remove(UpperCamelCase__ )
self.dq_store.appendleft(UpperCamelCase__ )
self.key_reference.add(UpperCamelCase__ )
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
for k in self.dq_store:
print(UpperCamelCase__ )
def __repr__( self : Optional[Any] ):
'''simple docstring'''
return F'''LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}'''
if __name__ == "__main__":
import doctest
doctest.testmod()
a = LRUCache(4)
lru_cache.refer('A')
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer('A')
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 650
| 1
|
import time
import warnings
from abc import ABC
from copy import deepcopy
from typing import Optional
import torch
from ..utils import add_start_docstrings, logging
a = logging.get_logger(__name__)
a = R'\n Args:\n input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`):\n Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax\n or scores for each vocabulary token after SoftMax.\n kwargs (`Dict[str, Any]`, *optional*):\n Additional stopping criteria specific kwargs.\n\n Return:\n `bool`. `False` indicates we should continue, `True` indicates we should stop.\n\n'
class UpperCamelCase__ ( __magic_name__ ):
@add_start_docstrings(UpperCamelCase__ )
def __call__( self : Dict , UpperCamelCase__ : torch.LongTensor , UpperCamelCase__ : torch.FloatTensor , **UpperCamelCase__ : int ):
'''simple docstring'''
raise NotImplementedError("""StoppingCriteria needs to be subclassed""" )
class UpperCamelCase__ ( __magic_name__ ):
def __init__( self : str , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] = None ):
'''simple docstring'''
lowercase_ = max_length
lowercase_ = max_position_embeddings
@add_start_docstrings(UpperCamelCase__ )
def __call__( self : Union[str, Any] , UpperCamelCase__ : torch.LongTensor , UpperCamelCase__ : torch.FloatTensor , **UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
lowercase_ = input_ids.shape[-1]
lowercase_ = cur_len >= self.max_length
if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings:
logger.warning_once(
"""This is a friendly reminder - the current text generation call will exceed the model's predefined """
F'''maximum length ({self.max_position_embeddings}). Depending on the model, you may observe '''
"""exceptions, performance degradation, or nothing at all.""" )
return is_done
class UpperCamelCase__ ( __magic_name__ ):
def __init__( self : str , UpperCamelCase__ : int , UpperCamelCase__ : int ):
'''simple docstring'''
warnings.warn(
"""The class `MaxNewTokensCriteria` is deprecated. """
F'''Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` '''
"""with `max_length = start_length + max_new_tokens` instead.""" , UpperCamelCase__ , )
lowercase_ = start_length
lowercase_ = max_new_tokens
lowercase_ = start_length + max_new_tokens
@add_start_docstrings(UpperCamelCase__ )
def __call__( self : Optional[int] , UpperCamelCase__ : torch.LongTensor , UpperCamelCase__ : torch.FloatTensor , **UpperCamelCase__ : int ):
'''simple docstring'''
return input_ids.shape[-1] >= self.max_length
class UpperCamelCase__ ( __magic_name__ ):
def __init__( self : Tuple , UpperCamelCase__ : float , UpperCamelCase__ : Optional[float] = None ):
'''simple docstring'''
lowercase_ = max_time
lowercase_ = time.time() if initial_timestamp is None else initial_timestamp
@add_start_docstrings(UpperCamelCase__ )
def __call__( self : List[str] , UpperCamelCase__ : torch.LongTensor , UpperCamelCase__ : torch.FloatTensor , **UpperCamelCase__ : List[str] ):
'''simple docstring'''
return time.time() - self.initial_timestamp > self.max_time
class UpperCamelCase__ ( __magic_name__ ):
@add_start_docstrings(UpperCamelCase__ )
def __call__( self : List[Any] , UpperCamelCase__ : torch.LongTensor , UpperCamelCase__ : torch.FloatTensor , **UpperCamelCase__ : int ):
'''simple docstring'''
return any(criteria(UpperCamelCase__ , UpperCamelCase__ ) for criteria in self )
@property
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
for stopping_criterium in self:
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
return stopping_criterium.max_length
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
return stopping_criterium.max_length
return None
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase_ = stopping_criteria.max_length
lowercase_ = deepcopy(UpperCAmelCase__ )
if stopping_max_length is not None and stopping_max_length != max_length:
warnings.warn("""You set different `max_length` for stopping criteria and `max_length` parameter""" , UpperCAmelCase__ )
elif stopping_max_length is None:
new_stopping_criteria.append(MaxLengthCriteria(max_length=UpperCAmelCase__ ) )
return new_stopping_criteria
| 650
|
def UpperCAmelCase_ ( UpperCAmelCase__ ):
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 650
| 1
|
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
a = logging.get_logger(__name__)
a = {
'deepmind/language-perceiver': 'https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json',
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class UpperCamelCase__ ( __magic_name__ ):
__SCREAMING_SNAKE_CASE : Any = 'perceiver'
def __init__( self : int , UpperCamelCase__ : Dict=256 , UpperCamelCase__ : int=1_280 , UpperCamelCase__ : Dict=768 , UpperCamelCase__ : List[Any]=1 , UpperCamelCase__ : Any=26 , UpperCamelCase__ : List[Any]=8 , UpperCamelCase__ : int=8 , UpperCamelCase__ : str=None , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : List[Any]="kv" , UpperCamelCase__ : Union[str, Any]=1 , UpperCamelCase__ : List[str]=1 , UpperCamelCase__ : Union[str, Any]="gelu" , UpperCamelCase__ : Tuple=0.1 , UpperCamelCase__ : List[Any]=0.02 , UpperCamelCase__ : Tuple=1e-12 , UpperCamelCase__ : Any=True , UpperCamelCase__ : Any=262 , UpperCamelCase__ : Optional[int]=2_048 , UpperCamelCase__ : Optional[int]=56 , UpperCamelCase__ : Union[str, Any]=[368, 496] , UpperCamelCase__ : Optional[Any]=16 , UpperCamelCase__ : Tuple=1_920 , UpperCamelCase__ : List[Any]=16 , UpperCamelCase__ : List[str]=[1, 16, 224, 224] , **UpperCamelCase__ : Optional[Any] , ):
'''simple docstring'''
super().__init__(**UpperCamelCase__ )
lowercase_ = num_latents
lowercase_ = d_latents
lowercase_ = d_model
lowercase_ = num_blocks
lowercase_ = num_self_attends_per_block
lowercase_ = num_self_attention_heads
lowercase_ = num_cross_attention_heads
lowercase_ = qk_channels
lowercase_ = v_channels
lowercase_ = cross_attention_shape_for_attention
lowercase_ = self_attention_widening_factor
lowercase_ = cross_attention_widening_factor
lowercase_ = hidden_act
lowercase_ = attention_probs_dropout_prob
lowercase_ = initializer_range
lowercase_ = layer_norm_eps
lowercase_ = use_query_residual
# masked language modeling attributes
lowercase_ = vocab_size
lowercase_ = max_position_embeddings
# image classification attributes
lowercase_ = image_size
# flow attributes
lowercase_ = train_size
# multimodal autoencoding attributes
lowercase_ = num_frames
lowercase_ = audio_samples_per_frame
lowercase_ = samples_per_patch
lowercase_ = output_shape
class UpperCamelCase__ ( __magic_name__ ):
@property
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
if self.task == "multiple-choice":
lowercase_ = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
lowercase_ = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""inputs""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
@property
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
return 1e-4
def UpperCAmelCase__ ( self : Union[str, Any] , UpperCamelCase__ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional[TensorType] = None , UpperCamelCase__ : int = 3 , UpperCamelCase__ : int = 40 , UpperCamelCase__ : int = 40 , ):
'''simple docstring'''
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowercase_ = compute_effective_axis_dimension(
UpperCamelCase__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
lowercase_ = preprocessor.num_special_tokens_to_add(UpperCamelCase__ )
lowercase_ = compute_effective_axis_dimension(
UpperCamelCase__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=UpperCamelCase__ )
# Generate dummy inputs according to compute batch and sequence
lowercase_ = [""" """.join(["""a"""] ) * seq_length] * batch_size
lowercase_ = dict(preprocessor(UpperCamelCase__ , return_tensors=UpperCamelCase__ ) )
lowercase_ = inputs.pop("""input_ids""" )
return inputs
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowercase_ = compute_effective_axis_dimension(UpperCamelCase__ , fixed_dimension=OnnxConfig.default_fixed_batch )
lowercase_ = self._generate_dummy_images(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
lowercase_ = dict(preprocessor(images=UpperCamelCase__ , return_tensors=UpperCamelCase__ ) )
lowercase_ = inputs.pop("""pixel_values""" )
return inputs
else:
raise ValueError(
"""Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor.""" )
| 650
|
def UpperCAmelCase_ ( UpperCAmelCase__=2_8_1_2_3 ):
lowercase_ = [1] * (limit + 1)
for i in range(2 , int(limit**0.5 ) + 1 ):
sum_divs[i * i] += i
for k in range(i + 1 , limit // i + 1 ):
sum_divs[k * i] += k + i
lowercase_ = set()
lowercase_ = 0
for n in range(1 , limit + 1 ):
if sum_divs[n] > n:
abundants.add(UpperCAmelCase__ )
if not any((n - a in abundants) for a in abundants ):
res += n
return res
if __name__ == "__main__":
print(solution())
| 650
| 1
|
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.local_sgd import LocalSGD
########################################################################
# This is a fully working simple example to use Accelerate
# with LocalSGD, which is a method to synchronize model
# parameters every K batches. It is different, but complementary
# to gradient accumulation.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
a = 1_6
a = 3_2
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ = 1_6 ):
lowercase_ = AutoTokenizer.from_pretrained("""bert-base-cased""" )
lowercase_ = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(UpperCAmelCase__ ):
# max_length=None => use the model max length (it's actually the default)
lowercase_ = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=UpperCAmelCase__ , max_length=UpperCAmelCase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowercase_ = datasets.map(
UpperCAmelCase__ , batched=UpperCAmelCase__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowercase_ = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(UpperCAmelCase__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowercase_ = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowercase_ = 1_6
elif accelerator.mixed_precision != "no":
lowercase_ = 8
else:
lowercase_ = None
return tokenizer.pad(
UpperCAmelCase__ , padding="""longest""" , max_length=UpperCAmelCase__ , pad_to_multiple_of=UpperCAmelCase__ , return_tensors="""pt""" , )
# Instantiate dataloaders.
lowercase_ = DataLoader(
tokenized_datasets["""train"""] , shuffle=UpperCAmelCase__ , collate_fn=UpperCAmelCase__ , batch_size=UpperCAmelCase__ )
lowercase_ = DataLoader(
tokenized_datasets["""validation"""] , shuffle=UpperCAmelCase__ , collate_fn=UpperCAmelCase__ , batch_size=UpperCAmelCase__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
a = mocked_dataloaders # noqa: F811
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ):
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , UpperCAmelCase__ ) == "1":
lowercase_ = 2
# New Code #
lowercase_ = int(args.gradient_accumulation_steps )
lowercase_ = int(args.local_sgd_steps )
# Initialize accelerator
lowercase_ = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=UpperCAmelCase__ )
if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:
raise NotImplementedError("""LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)""" )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowercase_ = config["""lr"""]
lowercase_ = int(config["""num_epochs"""] )
lowercase_ = int(config["""seed"""] )
lowercase_ = int(config["""batch_size"""] )
lowercase_ = evaluate.load("""glue""" , """mrpc""" )
set_seed(UpperCAmelCase__ )
lowercase_ , lowercase_ = get_dataloaders(UpperCAmelCase__ , UpperCAmelCase__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowercase_ = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=UpperCAmelCase__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowercase_ = model.to(accelerator.device )
# Instantiate optimizer
lowercase_ = AdamW(params=model.parameters() , lr=UpperCAmelCase__ )
# Instantiate scheduler
lowercase_ = get_linear_schedule_with_warmup(
optimizer=UpperCAmelCase__ , num_warmup_steps=1_0_0 , num_training_steps=(len(UpperCAmelCase__ ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ = accelerator.prepare(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# Now we train the model
for epoch in range(UpperCAmelCase__ ):
model.train()
with LocalSGD(
accelerator=UpperCAmelCase__ , model=UpperCAmelCase__ , local_sgd_steps=UpperCAmelCase__ , enabled=local_sgd_steps is not None ) as local_sgd:
for step, batch in enumerate(UpperCAmelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(UpperCAmelCase__ ):
lowercase_ = model(**UpperCAmelCase__ )
lowercase_ = output.loss
accelerator.backward(UpperCAmelCase__ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# LocalSGD-specific line
local_sgd.step()
model.eval()
for step, batch in enumerate(UpperCAmelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowercase_ = model(**UpperCAmelCase__ )
lowercase_ = outputs.logits.argmax(dim=-1 )
lowercase_ , lowercase_ = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=UpperCAmelCase__ , references=UpperCAmelCase__ , )
lowercase_ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , UpperCAmelCase__ )
def UpperCAmelCase_ ( ):
lowercase_ = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=UpperCAmelCase__ , default=UpperCAmelCase__ , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
# New Code #
parser.add_argument(
"""--gradient_accumulation_steps""" , type=UpperCAmelCase__ , default=1 , help="""The number of minibatches to be ran before gradients are accumulated.""" , )
parser.add_argument(
"""--local_sgd_steps""" , type=UpperCAmelCase__ , default=8 , help="""Number of local SGD steps or None to disable local SGD""" )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
lowercase_ = parser.parse_args()
lowercase_ = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 4_2, """batch_size""": 1_6}
training_function(UpperCAmelCase__ , UpperCAmelCase__ )
if __name__ == "__main__":
main()
| 650
|
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class UpperCamelCase__ :
def __init__( self : List[str] , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[str]=13 , UpperCamelCase__ : str=7 , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : str=False , UpperCamelCase__ : str=True , UpperCamelCase__ : Union[str, Any]=99 , UpperCamelCase__ : Dict=32 , UpperCamelCase__ : Union[str, Any]=5 , UpperCamelCase__ : Tuple=4 , UpperCamelCase__ : Dict=37 , UpperCamelCase__ : Any="gelu" , UpperCamelCase__ : Dict=0.1 , UpperCamelCase__ : Tuple=0.1 , UpperCamelCase__ : Tuple=512 , UpperCamelCase__ : Union[str, Any]=16 , UpperCamelCase__ : List[str]=2 , UpperCamelCase__ : int=0.02 , UpperCamelCase__ : int=3 , UpperCamelCase__ : Optional[Any]=4 , UpperCamelCase__ : int=None , ):
'''simple docstring'''
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = seq_length
lowercase_ = is_training
lowercase_ = use_input_mask
lowercase_ = use_token_type_ids
lowercase_ = use_labels
lowercase_ = vocab_size
lowercase_ = hidden_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = intermediate_size
lowercase_ = hidden_act
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = max_position_embeddings
lowercase_ = type_vocab_size
lowercase_ = type_sequence_label_size
lowercase_ = initializer_range
lowercase_ = num_labels
lowercase_ = num_choices
lowercase_ = scope
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase_ = None
if self.use_input_mask:
lowercase_ = random_attention_mask([self.batch_size, self.seq_length] )
lowercase_ = None
if self.use_token_type_ids:
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase_ = None
lowercase_ = None
lowercase_ = None
if self.use_labels:
lowercase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase_ = ids_tensor([self.batch_size] , self.num_choices )
lowercase_ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , use_stable_embedding=UpperCamelCase__ , )
def UpperCAmelCase__ ( self : str , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Dict , UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : str , UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
lowercase_ = OpenLlamaModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowercase_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ )
lowercase_ = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase__ ( self : Union[str, Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : str , UpperCamelCase__ : Tuple , UpperCamelCase__ : str , UpperCamelCase__ : int , UpperCamelCase__ : str , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any] , ):
'''simple docstring'''
lowercase_ = True
lowercase_ = OpenLlamaModel(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowercase_ = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , )
lowercase_ = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , )
lowercase_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase__ ( self : Any , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : str , UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Any , UpperCamelCase__ : List[Any] , ):
'''simple docstring'''
lowercase_ = OpenLlamaForCausalLM(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowercase_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase__ ( self : int , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : List[str] , UpperCamelCase__ : int , UpperCamelCase__ : str , UpperCamelCase__ : Tuple , ):
'''simple docstring'''
lowercase_ = True
lowercase_ = True
lowercase_ = OpenLlamaForCausalLM(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
# first forward pass
lowercase_ = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , use_cache=UpperCamelCase__ , )
lowercase_ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowercase_ = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowercase_ = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
lowercase_ = torch.cat([input_ids, next_tokens] , dim=-1 )
lowercase_ = torch.cat([input_mask, next_mask] , dim=-1 )
lowercase_ = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , output_hidden_states=UpperCamelCase__ , )["""hidden_states"""][0]
lowercase_ = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , past_key_values=UpperCamelCase__ , output_hidden_states=UpperCamelCase__ , )["""hidden_states"""][0]
# select random slice
lowercase_ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowercase_ = output_from_no_past[:, -3:, random_slice_idx].detach()
lowercase_ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-3 ) )
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ = self.prepare_config_and_inputs()
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) = config_and_inputs
lowercase_ = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
__SCREAMING_SNAKE_CASE : List[str] = (OpenLlamaForCausalLM,) if is_torch_available() else ()
__SCREAMING_SNAKE_CASE : List[Any] = (
{
'feature-extraction': OpenLlamaModel,
'text-classification': OpenLlamaForSequenceClassification,
'text-generation': OpenLlamaForCausalLM,
'zero-shot': OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
__SCREAMING_SNAKE_CASE : Tuple = False
__SCREAMING_SNAKE_CASE : Optional[int] = False
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
lowercase_ = OpenLlamaModelTester(self )
lowercase_ = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=37 )
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowercase_ = type
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ = 3
lowercase_ = input_dict["""input_ids"""]
lowercase_ = input_ids.ne(1 ).to(UpperCamelCase__ )
lowercase_ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowercase_ = OpenLlamaForSequenceClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowercase_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ = 3
lowercase_ = """single_label_classification"""
lowercase_ = input_dict["""input_ids"""]
lowercase_ = input_ids.ne(1 ).to(UpperCamelCase__ )
lowercase_ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowercase_ = OpenLlamaForSequenceClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowercase_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ = 3
lowercase_ = """multi_label_classification"""
lowercase_ = input_dict["""input_ids"""]
lowercase_ = input_ids.ne(1 ).to(UpperCamelCase__ )
lowercase_ = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
lowercase_ = OpenLlamaForSequenceClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowercase_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip("""Open-Llama buffers include complex numbers, which breaks this test""" )
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
pass
@parameterized.expand([("""linear""",), ("""dynamic""",)] )
def UpperCAmelCase__ ( self : Tuple , UpperCamelCase__ : Union[str, Any] ):
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ = ids_tensor([1, 10] , config.vocab_size )
lowercase_ = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowercase_ = OpenLlamaModel(UpperCamelCase__ )
original_model.to(UpperCamelCase__ )
original_model.eval()
lowercase_ = original_model(UpperCamelCase__ ).last_hidden_state
lowercase_ = original_model(UpperCamelCase__ ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowercase_ = {"""type""": scaling_type, """factor""": 10.0}
lowercase_ = OpenLlamaModel(UpperCamelCase__ )
scaled_model.to(UpperCamelCase__ )
scaled_model.eval()
lowercase_ = scaled_model(UpperCamelCase__ ).last_hidden_state
lowercase_ = scaled_model(UpperCamelCase__ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-5 ) )
| 650
| 1
|
def UpperCAmelCase_ ( UpperCAmelCase__=2_8_1_2_3 ):
lowercase_ = [1] * (limit + 1)
for i in range(2 , int(limit**0.5 ) + 1 ):
sum_divs[i * i] += i
for k in range(i + 1 , limit // i + 1 ):
sum_divs[k * i] += k + i
lowercase_ = set()
lowercase_ = 0
for n in range(1 , limit + 1 ):
if sum_divs[n] > n:
abundants.add(UpperCAmelCase__ )
if not any((n - a in abundants) for a in abundants ):
res += n
return res
if __name__ == "__main__":
print(solution())
| 650
|
import io
import math
from typing import Dict, Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
get_image_size,
infer_channel_dimension_format,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_vision_available, logging
from ...utils.import_utils import requires_backends
if is_vision_available():
import textwrap
from PIL import Image, ImageDraw, ImageFont
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
a = False
a = logging.get_logger(__name__)
a = 'ybelkada/fonts'
def UpperCAmelCase_ ( ):
if is_torch_available() and not is_torch_greater_or_equal_than_1_11:
raise ImportError(
F'''You are using torch=={torch.__version__}, but torch>=1.11.0 is required to use '''
"""Pix2StructImageProcessor. Please upgrade torch.""" )
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
requires_backends(UpperCAmelCase__ , ["""torch"""] )
_check_torch_version()
lowercase_ = image_tensor.unsqueeze(0 )
lowercase_ = torch.nn.functional.unfold(UpperCAmelCase__ , (patch_height, patch_width) , stride=(patch_height, patch_width) )
lowercase_ = patches.reshape(image_tensor.size(0 ) , image_tensor.size(1 ) , UpperCAmelCase__ , UpperCAmelCase__ , -1 )
lowercase_ = patches.permute(0 , 4 , 2 , 3 , 1 ).reshape(
image_tensor.size(2 ) // patch_height , image_tensor.size(3 ) // patch_width , image_tensor.size(1 ) * patch_height * patch_width , )
return patches.unsqueeze(0 )
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ = 3_6 , UpperCAmelCase__ = "black" , UpperCAmelCase__ = "white" , UpperCAmelCase__ = 5 , UpperCAmelCase__ = 5 , UpperCAmelCase__ = 5 , UpperCAmelCase__ = 5 , UpperCAmelCase__ = None , UpperCAmelCase__ = None , ):
requires_backends(UpperCAmelCase__ , """vision""" )
# Add new lines so that each line is no more than 80 characters.
lowercase_ = textwrap.TextWrapper(width=8_0 )
lowercase_ = wrapper.wrap(text=UpperCAmelCase__ )
lowercase_ = """\n""".join(UpperCAmelCase__ )
if font_bytes is not None and font_path is None:
lowercase_ = io.BytesIO(UpperCAmelCase__ )
elif font_path is not None:
lowercase_ = font_path
else:
lowercase_ = hf_hub_download(UpperCAmelCase__ , """Arial.TTF""" )
lowercase_ = ImageFont.truetype(UpperCAmelCase__ , encoding="""UTF-8""" , size=UpperCAmelCase__ )
# Use a temporary canvas to determine the width and height in pixels when
# rendering the text.
lowercase_ = ImageDraw.Draw(Image.new("""RGB""" , (1, 1) , UpperCAmelCase__ ) )
lowercase_ , lowercase_ , lowercase_ , lowercase_ = temp_draw.textbbox((0, 0) , UpperCAmelCase__ , UpperCAmelCase__ )
# Create the actual image with a bit of padding around the text.
lowercase_ = text_width + left_padding + right_padding
lowercase_ = text_height + top_padding + bottom_padding
lowercase_ = Image.new("""RGB""" , (image_width, image_height) , UpperCAmelCase__ )
lowercase_ = ImageDraw.Draw(UpperCAmelCase__ )
draw.text(xy=(left_padding, top_padding) , text=UpperCAmelCase__ , fill=UpperCAmelCase__ , font=UpperCAmelCase__ )
return image
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ):
requires_backends(UpperCAmelCase__ , """vision""" )
# Convert to PIL image if necessary
lowercase_ = to_pil_image(UpperCAmelCase__ )
lowercase_ = render_text(UpperCAmelCase__ , **UpperCAmelCase__ )
lowercase_ = max(header_image.width , image.width )
lowercase_ = int(image.height * (new_width / image.width) )
lowercase_ = int(header_image.height * (new_width / header_image.width) )
lowercase_ = Image.new("""RGB""" , (new_width, new_height + new_header_height) , """white""" )
new_image.paste(header_image.resize((new_width, new_header_height) ) , (0, 0) )
new_image.paste(image.resize((new_width, new_height) ) , (0, new_header_height) )
# Convert back to the original framework if necessary
lowercase_ = to_numpy_array(UpperCAmelCase__ )
if infer_channel_dimension_format(UpperCAmelCase__ ) == ChannelDimension.LAST:
lowercase_ = to_channel_dimension_format(UpperCAmelCase__ , ChannelDimension.LAST )
return new_image
class UpperCamelCase__ ( __magic_name__ ):
__SCREAMING_SNAKE_CASE : Tuple = ['flattened_patches']
def __init__( self : str , UpperCamelCase__ : bool = True , UpperCamelCase__ : bool = True , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : int = 2_048 , UpperCamelCase__ : bool = False , **UpperCamelCase__ : Optional[int] , ):
'''simple docstring'''
super().__init__(**UpperCamelCase__ )
lowercase_ = patch_size if patch_size is not None else {"""height""": 16, """width""": 16}
lowercase_ = do_normalize
lowercase_ = do_convert_rgb
lowercase_ = max_patches
lowercase_ = is_vqa
def UpperCAmelCase__ ( self : Optional[Any] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : int , UpperCamelCase__ : dict , **UpperCamelCase__ : Optional[int] ):
'''simple docstring'''
requires_backends(self.extract_flattened_patches , """torch""" )
_check_torch_version()
# convert to torch
lowercase_ = to_channel_dimension_format(UpperCamelCase__ , ChannelDimension.FIRST )
lowercase_ = torch.from_numpy(UpperCamelCase__ )
lowercase_ , lowercase_ = patch_size["""height"""], patch_size["""width"""]
lowercase_ , lowercase_ = get_image_size(UpperCamelCase__ )
# maximize scale s.t.
lowercase_ = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width) )
lowercase_ = max(min(math.floor(scale * image_height / patch_height ) , UpperCamelCase__ ) , 1 )
lowercase_ = max(min(math.floor(scale * image_width / patch_width ) , UpperCamelCase__ ) , 1 )
lowercase_ = max(num_feasible_rows * patch_height , 1 )
lowercase_ = max(num_feasible_cols * patch_width , 1 )
lowercase_ = torch.nn.functional.interpolate(
image.unsqueeze(0 ) , size=(resized_height, resized_width) , mode="""bilinear""" , align_corners=UpperCamelCase__ , antialias=UpperCamelCase__ , ).squeeze(0 )
# [1, rows, columns, patch_height * patch_width * image_channels]
lowercase_ = torch_extract_patches(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
lowercase_ = patches.shape
lowercase_ = patches_shape[1]
lowercase_ = patches_shape[2]
lowercase_ = patches_shape[3]
# [rows * columns, patch_height * patch_width * image_channels]
lowercase_ = patches.reshape([rows * columns, depth] )
# [rows * columns, 1]
lowercase_ = torch.arange(UpperCamelCase__ ).reshape([rows, 1] ).repeat(1 , UpperCamelCase__ ).reshape([rows * columns, 1] )
lowercase_ = torch.arange(UpperCamelCase__ ).reshape([1, columns] ).repeat(UpperCamelCase__ , 1 ).reshape([rows * columns, 1] )
# Offset by 1 so the ids do not contain zeros, which represent padding.
row_ids += 1
col_ids += 1
# Prepare additional patch features.
# [rows * columns, 1]
lowercase_ = row_ids.to(torch.floataa )
lowercase_ = col_ids.to(torch.floataa )
# [rows * columns, 2 + patch_height * patch_width * image_channels]
lowercase_ = torch.cat([row_ids, col_ids, patches] , -1 )
# [max_patches, 2 + patch_height * patch_width * image_channels]
lowercase_ = torch.nn.functional.pad(UpperCamelCase__ , [0, 0, 0, max_patches - (rows * columns)] ).float()
lowercase_ = to_numpy_array(UpperCamelCase__ )
return result
def UpperCAmelCase__ ( self : List[Any] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Dict ):
'''simple docstring'''
if image.dtype == np.uinta:
lowercase_ = image.astype(np.floataa )
# take mean across the whole `image`
lowercase_ = np.mean(UpperCamelCase__ )
lowercase_ = np.std(UpperCamelCase__ )
lowercase_ = max(UpperCamelCase__ , 1.0 / math.sqrt(np.prod(image.shape ) ) )
return normalize(UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ , **UpperCamelCase__ )
def UpperCAmelCase__ ( self : str , UpperCamelCase__ : ImageInput , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[Dict[str, int]] = None , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , UpperCamelCase__ : ChannelDimension = ChannelDimension.FIRST , **UpperCamelCase__ : Union[str, Any] , ):
'''simple docstring'''
lowercase_ = do_normalize if do_normalize is not None else self.do_normalize
lowercase_ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
lowercase_ = patch_size if patch_size is not None else self.patch_size
lowercase_ = max_patches if max_patches is not None else self.max_patches
lowercase_ = self.is_vqa
if kwargs.get("""data_format""" , UpperCamelCase__ ) is not None:
raise ValueError("""data_format is not an accepted input as the outputs are """ )
lowercase_ = make_list_of_images(UpperCamelCase__ )
if not valid_images(UpperCamelCase__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
lowercase_ = [convert_to_rgb(UpperCamelCase__ ) for image in images]
# All transformations expect numpy arrays.
lowercase_ = [to_numpy_array(UpperCamelCase__ ) for image in images]
if is_vqa:
if header_text is None:
raise ValueError("""A header text must be provided for VQA models.""" )
lowercase_ = kwargs.pop("""font_bytes""" , UpperCamelCase__ )
lowercase_ = kwargs.pop("""font_path""" , UpperCamelCase__ )
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
lowercase_ = [header_text] * len(UpperCamelCase__ )
lowercase_ = [
render_header(UpperCamelCase__ , header_text[i] , font_bytes=UpperCamelCase__ , font_path=UpperCamelCase__ )
for i, image in enumerate(UpperCamelCase__ )
]
if do_normalize:
lowercase_ = [self.normalize(image=UpperCamelCase__ ) for image in images]
# convert to torch tensor and permute
lowercase_ = [
self.extract_flattened_patches(image=UpperCamelCase__ , max_patches=UpperCamelCase__ , patch_size=UpperCamelCase__ )
for image in images
]
# create attention mask in numpy
lowercase_ = [(image.sum(axis=-1 ) != 0).astype(np.floataa ) for image in images]
lowercase_ = BatchFeature(
data={"""flattened_patches""": images, """attention_mask""": attention_masks} , tensor_type=UpperCamelCase__ )
return encoded_outputs
| 650
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
a = {
'configuration_mobilevit': ['MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MobileViTConfig', 'MobileViTOnnxConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = ['MobileViTFeatureExtractor']
a = ['MobileViTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
'MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MobileViTForImageClassification',
'MobileViTForSemanticSegmentation',
'MobileViTModel',
'MobileViTPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
'TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFMobileViTForImageClassification',
'TFMobileViTForSemanticSegmentation',
'TFMobileViTModel',
'TFMobileViTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 650
|
import cva
import numpy as np
class UpperCamelCase__ :
def __init__( self : List[str] , UpperCamelCase__ : float , UpperCamelCase__ : int ):
'''simple docstring'''
if k in (0.04, 0.06):
lowercase_ = k
lowercase_ = window_size
else:
raise ValueError("""invalid k value""" )
def __str__( self : Optional[int] ):
'''simple docstring'''
return str(self.k )
def UpperCAmelCase__ ( self : Optional[Any] , UpperCamelCase__ : str ):
'''simple docstring'''
lowercase_ = cva.imread(UpperCamelCase__ , 0 )
lowercase_ , lowercase_ = img.shape
lowercase_ = []
lowercase_ = img.copy()
lowercase_ = cva.cvtColor(UpperCamelCase__ , cva.COLOR_GRAY2RGB )
lowercase_ , lowercase_ = np.gradient(UpperCamelCase__ )
lowercase_ = dx**2
lowercase_ = dy**2
lowercase_ = dx * dy
lowercase_ = 0.04
lowercase_ = self.window_size // 2
for y in range(UpperCamelCase__ , h - offset ):
for x in range(UpperCamelCase__ , w - offset ):
lowercase_ = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowercase_ = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowercase_ = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
lowercase_ = (wxx * wyy) - (wxy**2)
lowercase_ = wxx + wyy
lowercase_ = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 255 )
return color_img, corner_list
if __name__ == "__main__":
a = HarrisCorner(0.04, 3)
a , a = edge_detect.detect('path_to_image')
cva.imwrite('detect.png', color_img)
| 650
| 1
|
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase_ = [1]
for i in range(2 , UpperCAmelCase__ ):
factorials.append(factorials[-1] * i )
assert 0 <= k < factorials[-1] * n, "k out of bounds"
lowercase_ = []
lowercase_ = list(range(UpperCAmelCase__ ) )
# Find permutation
while factorials:
lowercase_ = factorials.pop()
lowercase_ , lowercase_ = divmod(UpperCAmelCase__ , UpperCAmelCase__ )
permutation.append(elements[number] )
elements.remove(elements[number] )
permutation.append(elements[0] )
return permutation
if __name__ == "__main__":
import doctest
doctest.testmod()
| 650
|
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse('9.1.0'):
a = {
'linear': PIL.Image.Resampling.BILINEAR,
'bilinear': PIL.Image.Resampling.BILINEAR,
'bicubic': PIL.Image.Resampling.BICUBIC,
'lanczos': PIL.Image.Resampling.LANCZOS,
'nearest': PIL.Image.Resampling.NEAREST,
}
else:
a = {
'linear': PIL.Image.LINEAR,
'bilinear': PIL.Image.BILINEAR,
'bicubic': PIL.Image.BICUBIC,
'lanczos': PIL.Image.LANCZOS,
'nearest': PIL.Image.NEAREST,
}
def UpperCAmelCase_ ( UpperCAmelCase__ ):
lowercase_ = (images / 2 + 0.5).clamp(0 , 1 )
lowercase_ = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
lowercase_ = numpy_to_pil(UpperCAmelCase__ )
return images
def UpperCAmelCase_ ( UpperCAmelCase__ ):
if images.ndim == 3:
lowercase_ = images[None, ...]
lowercase_ = (images * 2_5_5).round().astype("""uint8""" )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
lowercase_ = [Image.fromarray(image.squeeze() , mode="""L""" ) for image in images]
else:
lowercase_ = [Image.fromarray(UpperCAmelCase__ ) for image in images]
return pil_images
| 650
| 1
|
from math import sqrt
import numpy as np
from sympy import symbols
# Coefficient
# Speed of light (m/s)
a = 2_9_9_7_9_2_4_5_8
# Symbols
a , a , a , a = symbols('ct x y z')
def UpperCAmelCase_ ( UpperCAmelCase__ ):
if velocity > c:
raise ValueError("""Speed must not exceed light speed 299,792,458 [m/s]!""" )
elif velocity < 1:
# Usually the speed should be much higher than 1 (c order of magnitude)
raise ValueError("""Speed must be greater than or equal to 1!""" )
return velocity / c
def UpperCAmelCase_ ( UpperCAmelCase__ ):
return 1 / sqrt(1 - beta(UpperCAmelCase__ ) ** 2 )
def UpperCAmelCase_ ( UpperCAmelCase__ ):
return np.array(
[
[gamma(UpperCAmelCase__ ), -gamma(UpperCAmelCase__ ) * beta(UpperCAmelCase__ ), 0, 0],
[-gamma(UpperCAmelCase__ ) * beta(UpperCAmelCase__ ), gamma(UpperCAmelCase__ ), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
] )
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ = None ):
# Ensure event is not empty
if event is None:
lowercase_ = np.array([ct, x, y, z] ) # Symbolic four vector
else:
event[0] *= c # x0 is ct (speed of light * time)
return transformation_matrix(UpperCAmelCase__ ) @ event
if __name__ == "__main__":
import doctest
doctest.testmod()
# Example of symbolic vector:
a = transform(2_9_9_7_9_2_4_5)
print('Example of four vector: ')
print(F'''ct\' = {four_vector[0]}''')
print(F'''x\' = {four_vector[1]}''')
print(F'''y\' = {four_vector[2]}''')
print(F'''z\' = {four_vector[3]}''')
# Substitute symbols with numerical values
a = {ct: c, x: 1, y: 1, z: 1}
a = [four_vector[i].subs(sub_dict) for i in range(4)]
print(F'''\n{numerical_vector}''')
| 650
|
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCamelCase__ ( __magic_name__ ):
__SCREAMING_SNAKE_CASE : str = (UnCLIPScheduler,)
def UpperCAmelCase__ ( self : int , **UpperCamelCase__ : int ):
'''simple docstring'''
lowercase_ = {
"""num_train_timesteps""": 1_000,
"""variance_type""": """fixed_small_log""",
"""clip_sample""": True,
"""clip_sample_range""": 1.0,
"""prediction_type""": """epsilon""",
}
config.update(**UpperCamelCase__ )
return config
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
for timesteps in [1, 5, 100, 1_000]:
self.check_over_configs(num_train_timesteps=UpperCamelCase__ )
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=UpperCamelCase__ )
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=UpperCamelCase__ )
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=UpperCamelCase__ )
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=UpperCamelCase__ )
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
for time_step in [0, 500, 999]:
for prev_timestep in [None, 5, 100, 250, 500, 750]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=UpperCamelCase__ , prev_timestep=UpperCamelCase__ )
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ = self.scheduler_classes[0]
lowercase_ = self.get_scheduler_config(variance_type="""fixed_small_log""" )
lowercase_ = scheduler_class(**UpperCamelCase__ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0000e-10 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_549_625 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.9_994_987 ) ) < 1e-5
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
lowercase_ = self.scheduler_classes[0]
lowercase_ = self.get_scheduler_config(variance_type="""learned_range""" )
lowercase_ = scheduler_class(**UpperCamelCase__ )
lowercase_ = 0.5
assert scheduler._get_variance(1 , predicted_variance=UpperCamelCase__ ) - -10.1_712_790 < 1e-5
assert scheduler._get_variance(487 , predicted_variance=UpperCamelCase__ ) - -5.7_998_052 < 1e-5
assert scheduler._get_variance(999 , predicted_variance=UpperCamelCase__ ) - -0.0_010_011 < 1e-5
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ = self.scheduler_classes[0]
lowercase_ = self.get_scheduler_config()
lowercase_ = scheduler_class(**UpperCamelCase__ )
lowercase_ = scheduler.timesteps
lowercase_ = self.dummy_model()
lowercase_ = self.dummy_sample_deter
lowercase_ = torch.manual_seed(0 )
for i, t in enumerate(UpperCamelCase__ ):
# 1. predict noise residual
lowercase_ = model(UpperCamelCase__ , UpperCamelCase__ )
# 2. predict previous mean of sample x_t-1
lowercase_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , generator=UpperCamelCase__ ).prev_sample
lowercase_ = pred_prev_sample
lowercase_ = torch.sum(torch.abs(UpperCamelCase__ ) )
lowercase_ = torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_sum.item() - 252.2_682_495 ) < 1e-2
assert abs(result_mean.item() - 0.3_284_743 ) < 1e-3
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
lowercase_ = self.scheduler_classes[0]
lowercase_ = self.get_scheduler_config()
lowercase_ = scheduler_class(**UpperCamelCase__ )
scheduler.set_timesteps(25 )
lowercase_ = scheduler.timesteps
lowercase_ = self.dummy_model()
lowercase_ = self.dummy_sample_deter
lowercase_ = torch.manual_seed(0 )
for i, t in enumerate(UpperCamelCase__ ):
# 1. predict noise residual
lowercase_ = model(UpperCamelCase__ , UpperCamelCase__ )
if i + 1 == timesteps.shape[0]:
lowercase_ = None
else:
lowercase_ = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
lowercase_ = scheduler.step(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , prev_timestep=UpperCamelCase__ , generator=UpperCamelCase__ ).prev_sample
lowercase_ = pred_prev_sample
lowercase_ = torch.sum(torch.abs(UpperCamelCase__ ) )
lowercase_ = torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_sum.item() - 258.2_044_983 ) < 1e-2
assert abs(result_mean.item() - 0.3_362_038 ) < 1e-3
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
pass
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
pass
| 650
| 1
|
from math import isqrt
def UpperCAmelCase_ ( UpperCAmelCase__ ):
return all(number % divisor != 0 for divisor in range(2 , isqrt(UpperCAmelCase__ ) + 1 ) )
def UpperCAmelCase_ ( UpperCAmelCase__ = 1_0**6 ):
lowercase_ = 0
lowercase_ = 1
lowercase_ = 7
while prime_candidate < max_prime:
primes_count += is_prime(UpperCAmelCase__ )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(F'''{solution() = }''')
| 650
|
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import numpy as np
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForMaskedImageModeling,
HfArgumentParser,
Trainer,
TrainingArguments,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
a = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt')
a = list(MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING.keys())
a = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class UpperCamelCase__ :
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default='cifar10' , metadata={'help': 'Name of a dataset from the datasets package'} )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__magic_name__ , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__magic_name__ , metadata={'help': 'The column name of the images in the files. If not set, will try to use \'image\' or \'img\'.'} , )
__SCREAMING_SNAKE_CASE : Optional[str] = field(default=__magic_name__ , metadata={'help': 'A folder containing the training data.'} )
__SCREAMING_SNAKE_CASE : Optional[str] = field(default=__magic_name__ , metadata={'help': 'A folder containing the validation data.'} )
__SCREAMING_SNAKE_CASE : Optional[float] = field(
default=0.1_5 , metadata={'help': 'Percent to split off of train for validation.'} )
__SCREAMING_SNAKE_CASE : int = field(default=32 , metadata={'help': 'The size of the square patches to use for masking.'} )
__SCREAMING_SNAKE_CASE : float = field(
default=0.6 , metadata={'help': 'Percentage of patches to mask.'} , )
__SCREAMING_SNAKE_CASE : Optional[int] = field(
default=__magic_name__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
__SCREAMING_SNAKE_CASE : Optional[int] = field(
default=__magic_name__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
lowercase_ = {}
if self.train_dir is not None:
lowercase_ = self.train_dir
if self.validation_dir is not None:
lowercase_ = self.validation_dir
lowercase_ = data_files if data_files else None
@dataclass
class UpperCamelCase__ :
__SCREAMING_SNAKE_CASE : str = field(
default=__magic_name__ , metadata={
'help': (
'The model checkpoint for weights initialization. Can be a local path to a pytorch_model.bin or a '
'checkpoint identifier on the hub. '
'Don\'t set if you want to train a model from scratch.'
)
} , )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__magic_name__ , metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(__magic_name__ )} , )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__magic_name__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__magic_name__ , metadata={
'help': (
'Override some existing default config settings when a model is trained from scratch. Example: '
'n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'
)
} , )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__magic_name__ , metadata={'help': 'Where do you want to store (cache) the pretrained models/datasets downloaded from the hub'} , )
__SCREAMING_SNAKE_CASE : str = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
__SCREAMING_SNAKE_CASE : str = field(default=__magic_name__ , metadata={'help': 'Name or path of preprocessor config.'} )
__SCREAMING_SNAKE_CASE : bool = field(
default=__magic_name__ , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
__SCREAMING_SNAKE_CASE : Optional[int] = field(
default=__magic_name__ , metadata={
'help': (
'The size (resolution) of each image. If not specified, will use `image_size` of the configuration.'
)
} , )
__SCREAMING_SNAKE_CASE : Optional[int] = field(
default=__magic_name__ , metadata={
'help': (
'The size (resolution) of each patch. If not specified, will use `patch_size` of the configuration.'
)
} , )
__SCREAMING_SNAKE_CASE : Optional[int] = field(
default=__magic_name__ , metadata={'help': 'Stride to use for the encoder.'} , )
class UpperCamelCase__ :
def __init__( self : Dict , UpperCamelCase__ : List[Any]=192 , UpperCamelCase__ : Tuple=32 , UpperCamelCase__ : Dict=4 , UpperCamelCase__ : str=0.6 ):
'''simple docstring'''
lowercase_ = input_size
lowercase_ = mask_patch_size
lowercase_ = model_patch_size
lowercase_ = mask_ratio
if self.input_size % self.mask_patch_size != 0:
raise ValueError("""Input size must be divisible by mask patch size""" )
if self.mask_patch_size % self.model_patch_size != 0:
raise ValueError("""Mask patch size must be divisible by model patch size""" )
lowercase_ = self.input_size // self.mask_patch_size
lowercase_ = self.mask_patch_size // self.model_patch_size
lowercase_ = self.rand_size**2
lowercase_ = int(np.ceil(self.token_count * self.mask_ratio ) )
def __call__( self : int ):
'''simple docstring'''
lowercase_ = np.random.permutation(self.token_count )[: self.mask_count]
lowercase_ = np.zeros(self.token_count , dtype=UpperCamelCase__ )
lowercase_ = 1
lowercase_ = mask.reshape((self.rand_size, self.rand_size) )
lowercase_ = mask.repeat(self.scale , axis=0 ).repeat(self.scale , axis=1 )
return torch.tensor(mask.flatten() )
def UpperCAmelCase_ ( UpperCAmelCase__ ):
lowercase_ = torch.stack([example["""pixel_values"""] for example in examples] )
lowercase_ = torch.stack([example["""mask"""] for example in examples] )
return {"pixel_values": pixel_values, "bool_masked_pos": mask}
def UpperCAmelCase_ ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowercase_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowercase_ , lowercase_ , lowercase_ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowercase_ , lowercase_ , lowercase_ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_mim""" , UpperCAmelCase__ , UpperCAmelCase__ )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowercase_ = training_args.get_process_log_level()
logger.setLevel(UpperCAmelCase__ )
transformers.utils.logging.set_verbosity(UpperCAmelCase__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
lowercase_ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowercase_ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Initialize our dataset.
lowercase_ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
lowercase_ = None if """validation""" in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , UpperCAmelCase__ ) and data_args.train_val_split > 0.0:
lowercase_ = ds["""train"""].train_test_split(data_args.train_val_split )
lowercase_ = split["""train"""]
lowercase_ = split["""test"""]
# Create config
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowercase_ = {
"""cache_dir""": model_args.cache_dir,
"""revision""": model_args.model_revision,
"""use_auth_token""": True if model_args.use_auth_token else None,
}
if model_args.config_name_or_path:
lowercase_ = AutoConfig.from_pretrained(model_args.config_name_or_path , **UpperCAmelCase__ )
elif model_args.model_name_or_path:
lowercase_ = AutoConfig.from_pretrained(model_args.model_name_or_path , **UpperCAmelCase__ )
else:
lowercase_ = CONFIG_MAPPING[model_args.model_type]()
logger.warning("""You are instantiating a new config instance from scratch.""" )
if model_args.config_overrides is not None:
logger.info(F'''Overriding config: {model_args.config_overrides}''' )
config.update_from_string(model_args.config_overrides )
logger.info(F'''New config: {config}''' )
# make sure the decoder_type is "simmim" (only relevant for BEiT)
if hasattr(UpperCAmelCase__ , """decoder_type""" ):
lowercase_ = """simmim"""
# adapt config
lowercase_ = model_args.image_size if model_args.image_size is not None else config.image_size
lowercase_ = model_args.patch_size if model_args.patch_size is not None else config.patch_size
lowercase_ = (
model_args.encoder_stride if model_args.encoder_stride is not None else config.encoder_stride
)
config.update(
{
"""image_size""": model_args.image_size,
"""patch_size""": model_args.patch_size,
"""encoder_stride""": model_args.encoder_stride,
} )
# create image processor
if model_args.image_processor_name:
lowercase_ = AutoImageProcessor.from_pretrained(model_args.image_processor_name , **UpperCAmelCase__ )
elif model_args.model_name_or_path:
lowercase_ = AutoImageProcessor.from_pretrained(model_args.model_name_or_path , **UpperCAmelCase__ )
else:
lowercase_ = {
conf.model_type: image_processor_class for conf, image_processor_class in IMAGE_PROCESSOR_MAPPING.items()
}
lowercase_ = IMAGE_PROCESSOR_TYPES[model_args.model_type]()
# create model
if model_args.model_name_or_path:
lowercase_ = AutoModelForMaskedImageModeling.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=UpperCAmelCase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info("""Training new model from scratch""" )
lowercase_ = AutoModelForMaskedImageModeling.from_config(UpperCAmelCase__ )
if training_args.do_train:
lowercase_ = ds["""train"""].column_names
else:
lowercase_ = ds["""validation"""].column_names
if data_args.image_column_name is not None:
lowercase_ = data_args.image_column_name
elif "image" in column_names:
lowercase_ = """image"""
elif "img" in column_names:
lowercase_ = """img"""
else:
lowercase_ = column_names[0]
# transformations as done in original SimMIM paper
# source: https://github.com/microsoft/SimMIM/blob/main/data/data_simmim.py
lowercase_ = Compose(
[
Lambda(lambda UpperCAmelCase__ : img.convert("""RGB""" ) if img.mode != "RGB" else img ),
RandomResizedCrop(model_args.image_size , scale=(0.67, 1.0) , ratio=(3.0 / 4.0, 4.0 / 3.0) ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
# create mask generator
lowercase_ = MaskGenerator(
input_size=model_args.image_size , mask_patch_size=data_args.mask_patch_size , model_patch_size=model_args.patch_size , mask_ratio=data_args.mask_ratio , )
def preprocess_images(UpperCAmelCase__ ):
lowercase_ = [transforms(UpperCAmelCase__ ) for image in examples[image_column_name]]
lowercase_ = [mask_generator() for i in range(len(examples[image_column_name] ) )]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError("""--do_train requires a train dataset""" )
if data_args.max_train_samples is not None:
lowercase_ = ds["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(UpperCAmelCase__ )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError("""--do_eval requires a validation dataset""" )
if data_args.max_eval_samples is not None:
lowercase_ = (
ds["""validation"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(UpperCAmelCase__ )
# Initialize our trainer
lowercase_ = Trainer(
model=UpperCAmelCase__ , args=UpperCAmelCase__ , train_dataset=ds["""train"""] if training_args.do_train else None , eval_dataset=ds["""validation"""] if training_args.do_eval else None , tokenizer=UpperCAmelCase__ , data_collator=UpperCAmelCase__ , )
# Training
if training_args.do_train:
lowercase_ = None
if training_args.resume_from_checkpoint is not None:
lowercase_ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowercase_ = last_checkpoint
lowercase_ = trainer.train(resume_from_checkpoint=UpperCAmelCase__ )
trainer.save_model()
trainer.log_metrics("""train""" , train_result.metrics )
trainer.save_metrics("""train""" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
lowercase_ = trainer.evaluate()
trainer.log_metrics("""eval""" , UpperCAmelCase__ )
trainer.save_metrics("""eval""" , UpperCAmelCase__ )
# Write model card and (optionally) push to hub
lowercase_ = {
"""finetuned_from""": model_args.model_name_or_path,
"""tasks""": """masked-image-modeling""",
"""dataset""": data_args.dataset_name,
"""tags""": ["""masked-image-modeling"""],
}
if training_args.push_to_hub:
trainer.push_to_hub(**UpperCAmelCase__ )
else:
trainer.create_model_card(**UpperCAmelCase__ )
if __name__ == "__main__":
main()
| 650
| 1
|
a = {
'meter': 'm',
'kilometer': 'km',
'megametre': 'Mm',
'gigametre': 'Gm',
'terametre': 'Tm',
'petametre': 'Pm',
'exametre': 'Em',
'zettametre': 'Zm',
'yottametre': 'Ym',
}
# Exponent of the factor(meter)
a = {
'm': 0,
'km': 3,
'Mm': 6,
'Gm': 9,
'Tm': 1_2,
'Pm': 1_5,
'Em': 1_8,
'Zm': 2_1,
'Ym': 2_4,
}
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase_ = from_type.lower().strip("""s""" )
lowercase_ = to_type.lower().strip("""s""" )
lowercase_ = UNIT_SYMBOL.get(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase_ = UNIT_SYMBOL.get(UpperCAmelCase__ , UpperCAmelCase__ )
if from_sanitized not in METRIC_CONVERSION:
lowercase_ = (
F'''Invalid \'from_type\' value: {from_type!r}.\n'''
F'''Conversion abbreviations are: {", ".join(UpperCAmelCase__ )}'''
)
raise ValueError(UpperCAmelCase__ )
if to_sanitized not in METRIC_CONVERSION:
lowercase_ = (
F'''Invalid \'to_type\' value: {to_type!r}.\n'''
F'''Conversion abbreviations are: {", ".join(UpperCAmelCase__ )}'''
)
raise ValueError(UpperCAmelCase__ )
lowercase_ = METRIC_CONVERSION[from_sanitized]
lowercase_ = METRIC_CONVERSION[to_sanitized]
lowercase_ = 1
if from_exponent > to_exponent:
lowercase_ = from_exponent - to_exponent
else:
lowercase_ = -(to_exponent - from_exponent)
return value * pow(1_0 , UpperCAmelCase__ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 650
|
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
a = logging.get_logger(__name__)
class UpperCamelCase__ ( __magic_name__ ):
__SCREAMING_SNAKE_CASE : List[Any] = ['pixel_values']
def __init__( self : List[str] , UpperCamelCase__ : bool = True , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase__ : bool = True , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : bool = True , UpperCamelCase__ : Union[int, float] = 1 / 255 , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_MEAN , UpperCamelCase__ : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_STD , **UpperCamelCase__ : Dict , ):
'''simple docstring'''
super().__init__(**UpperCamelCase__ )
lowercase_ = size if size is not None else {"""shortest_edge""": 224}
lowercase_ = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ )
lowercase_ = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
lowercase_ = get_size_dict(UpperCamelCase__ , param_name="""crop_size""" )
lowercase_ = do_resize
lowercase_ = size
lowercase_ = resample
lowercase_ = do_center_crop
lowercase_ = crop_size
lowercase_ = do_rescale
lowercase_ = rescale_factor
lowercase_ = do_normalize
lowercase_ = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
lowercase_ = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def UpperCAmelCase__ ( self : int , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Optional[int] , ):
'''simple docstring'''
lowercase_ = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
lowercase_ = int((256 / 224) * size["""shortest_edge"""] )
lowercase_ = get_resize_output_image_size(UpperCamelCase__ , size=UpperCamelCase__ , default_to_square=UpperCamelCase__ )
lowercase_ = {"""height""": output_size[0], """width""": output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
F'''Size dict must have keys \'height\' and \'width\' or \'shortest_edge\'. Got {size_dict.keys()}''' )
return resize(
UpperCamelCase__ , size=(size_dict["""height"""], size_dict["""width"""]) , resample=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def UpperCAmelCase__ ( self : str , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : str , ):
'''simple docstring'''
lowercase_ = get_size_dict(UpperCamelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(F'''Size dict must have keys \'height\' and \'width\'. Got {size.keys()}''' )
return center_crop(UpperCamelCase__ , size=(size["""height"""], size["""width"""]) , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def UpperCAmelCase__ ( self : Optional[int] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Union[int, float] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : str , ):
'''simple docstring'''
return rescale(UpperCamelCase__ , scale=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def UpperCAmelCase__ ( self : List[str] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Optional[int] , ):
'''simple docstring'''
return normalize(UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def UpperCAmelCase__ ( self : Dict , UpperCamelCase__ : ImageInput , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[Dict[str, int]] = None , UpperCamelCase__ : PILImageResampling = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[Dict[str, int]] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[float] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[Union[float, Iterable[float]]] = None , UpperCamelCase__ : Optional[Union[float, Iterable[float]]] = None , UpperCamelCase__ : Optional[TensorType] = None , UpperCamelCase__ : ChannelDimension = ChannelDimension.FIRST , **UpperCamelCase__ : Optional[int] , ):
'''simple docstring'''
lowercase_ = do_resize if do_resize is not None else self.do_resize
lowercase_ = resample if resample is not None else self.resample
lowercase_ = do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase_ = do_rescale if do_rescale is not None else self.do_rescale
lowercase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase_ = do_normalize if do_normalize is not None else self.do_normalize
lowercase_ = image_mean if image_mean is not None else self.image_mean
lowercase_ = image_std if image_std is not None else self.image_std
lowercase_ = size if size is not None else self.size
lowercase_ = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ )
lowercase_ = crop_size if crop_size is not None else self.crop_size
lowercase_ = get_size_dict(UpperCamelCase__ , param_name="""crop_size""" )
lowercase_ = make_list_of_images(UpperCamelCase__ )
if not valid_images(UpperCamelCase__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
lowercase_ = [to_numpy_array(UpperCamelCase__ ) for image in images]
if do_resize:
lowercase_ = [self.resize(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) for image in images]
if do_center_crop:
lowercase_ = [self.center_crop(UpperCamelCase__ , UpperCamelCase__ ) for image in images]
if do_rescale:
lowercase_ = [self.rescale(UpperCamelCase__ , UpperCamelCase__ ) for image in images]
if do_normalize:
lowercase_ = [self.normalize(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) for image in images]
lowercase_ = [to_channel_dimension_format(UpperCamelCase__ , UpperCamelCase__ ) for image in images]
lowercase_ = {"""pixel_values""": images}
return BatchFeature(data=UpperCamelCase__ , tensor_type=UpperCamelCase__ )
| 650
| 1
|
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
a = collections.namedtuple('_Datasets', ['train', 'validation', 'test'])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
a = 'https://storage.googleapis.com/cvdf-datasets/mnist/'
def UpperCAmelCase_ ( UpperCAmelCase__ ):
lowercase_ = numpy.dtype(numpy.uintaa ).newbyteorder(""">""" )
return numpy.frombuffer(bytestream.read(4 ) , dtype=UpperCAmelCase__ )[0]
@deprecated(UpperCAmelCase__ , """Please use tf.data to implement this functionality.""" )
def UpperCAmelCase_ ( UpperCAmelCase__ ):
print("""Extracting""" , f.name )
with gzip.GzipFile(fileobj=UpperCAmelCase__ ) as bytestream:
lowercase_ = _readaa(UpperCAmelCase__ )
if magic != 2_0_5_1:
raise ValueError(
"""Invalid magic number %d in MNIST image file: %s""" % (magic, f.name) )
lowercase_ = _readaa(UpperCAmelCase__ )
lowercase_ = _readaa(UpperCAmelCase__ )
lowercase_ = _readaa(UpperCAmelCase__ )
lowercase_ = bytestream.read(rows * cols * num_images )
lowercase_ = numpy.frombuffer(UpperCAmelCase__ , dtype=numpy.uinta )
lowercase_ = data.reshape(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , 1 )
return data
@deprecated(UpperCAmelCase__ , """Please use tf.one_hot on tensors.""" )
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase_ = labels_dense.shape[0]
lowercase_ = numpy.arange(UpperCAmelCase__ ) * num_classes
lowercase_ = numpy.zeros((num_labels, num_classes) )
lowercase_ = 1
return labels_one_hot
@deprecated(UpperCAmelCase__ , """Please use tf.data to implement this functionality.""" )
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__=False , UpperCAmelCase__=1_0 ):
print("""Extracting""" , f.name )
with gzip.GzipFile(fileobj=UpperCAmelCase__ ) as bytestream:
lowercase_ = _readaa(UpperCAmelCase__ )
if magic != 2_0_4_9:
raise ValueError(
"""Invalid magic number %d in MNIST label file: %s""" % (magic, f.name) )
lowercase_ = _readaa(UpperCAmelCase__ )
lowercase_ = bytestream.read(UpperCAmelCase__ )
lowercase_ = numpy.frombuffer(UpperCAmelCase__ , dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(UpperCAmelCase__ , UpperCAmelCase__ )
return labels
class UpperCamelCase__ :
@deprecated(
UpperCamelCase__ , """Please use alternatives such as official/mnist/_DataSet.py"""
""" from tensorflow/models.""" , )
def __init__( self : str , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Union[str, Any]=False , UpperCamelCase__ : List[str]=False , UpperCamelCase__ : Optional[Any]=dtypes.floataa , UpperCamelCase__ : Optional[Any]=True , UpperCamelCase__ : str=None , ):
'''simple docstring'''
lowercase_ , lowercase_ = random_seed.get_seed(UpperCamelCase__ )
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda )
lowercase_ = dtypes.as_dtype(UpperCamelCase__ ).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError("""Invalid image dtype %r, expected uint8 or float32""" % dtype )
if fake_data:
lowercase_ = 10_000
lowercase_ = one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), F'''images.shape: {images.shape} labels.shape: {labels.shape}'''
lowercase_ = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
lowercase_ = images.reshape(
images.shape[0] , images.shape[1] * images.shape[2] )
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
lowercase_ = images.astype(numpy.floataa )
lowercase_ = numpy.multiply(UpperCamelCase__ , 1.0 / 255.0 )
lowercase_ = images
lowercase_ = labels
lowercase_ = 0
lowercase_ = 0
@property
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
return self._images
@property
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
return self._labels
@property
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
return self._num_examples
@property
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
return self._epochs_completed
def UpperCAmelCase__ ( self : List[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Dict=False , UpperCamelCase__ : int=True ):
'''simple docstring'''
if fake_data:
lowercase_ = [1] * 784
lowercase_ = [1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(UpperCamelCase__ )],
[fake_label for _ in range(UpperCamelCase__ )],
)
lowercase_ = self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
lowercase_ = numpy.arange(self._num_examples )
numpy.random.shuffle(UpperCamelCase__ )
lowercase_ = self.images[perma]
lowercase_ = self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
lowercase_ = self._num_examples - start
lowercase_ = self._images[start : self._num_examples]
lowercase_ = self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
lowercase_ = numpy.arange(self._num_examples )
numpy.random.shuffle(UpperCamelCase__ )
lowercase_ = self.images[perm]
lowercase_ = self.labels[perm]
# Start next epoch
lowercase_ = 0
lowercase_ = batch_size - rest_num_examples
lowercase_ = self._index_in_epoch
lowercase_ = self._images[start:end]
lowercase_ = self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part) , axis=0 ),
numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ),
)
else:
self._index_in_epoch += batch_size
lowercase_ = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(UpperCAmelCase__ , """Please write your own downloading logic.""" )
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
if not gfile.Exists(UpperCAmelCase__ ):
gfile.MakeDirs(UpperCAmelCase__ )
lowercase_ = os.path.join(UpperCAmelCase__ , UpperCAmelCase__ )
if not gfile.Exists(UpperCAmelCase__ ):
urllib.request.urlretrieve(UpperCAmelCase__ , UpperCAmelCase__ ) # noqa: S310
with gfile.GFile(UpperCAmelCase__ ) as f:
lowercase_ = f.size()
print("""Successfully downloaded""" , UpperCAmelCase__ , UpperCAmelCase__ , """bytes.""" )
return filepath
@deprecated(
UpperCAmelCase__ , """Please use alternatives such as:""" """ tensorflow_datasets.load('mnist')""" )
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__=False , UpperCAmelCase__=False , UpperCAmelCase__=dtypes.floataa , UpperCAmelCase__=True , UpperCAmelCase__=5_0_0_0 , UpperCAmelCase__=None , UpperCAmelCase__=DEFAULT_SOURCE_URL , ):
if fake_data:
def fake():
return _DataSet(
[] , [] , fake_data=UpperCAmelCase__ , one_hot=UpperCAmelCase__ , dtype=UpperCAmelCase__ , seed=UpperCAmelCase__ )
lowercase_ = fake()
lowercase_ = fake()
lowercase_ = fake()
return _Datasets(train=UpperCAmelCase__ , validation=UpperCAmelCase__ , test=UpperCAmelCase__ )
if not source_url: # empty string check
lowercase_ = DEFAULT_SOURCE_URL
lowercase_ = """train-images-idx3-ubyte.gz"""
lowercase_ = """train-labels-idx1-ubyte.gz"""
lowercase_ = """t10k-images-idx3-ubyte.gz"""
lowercase_ = """t10k-labels-idx1-ubyte.gz"""
lowercase_ = _maybe_download(
UpperCAmelCase__ , UpperCAmelCase__ , source_url + train_images_file )
with gfile.Open(UpperCAmelCase__ , """rb""" ) as f:
lowercase_ = _extract_images(UpperCAmelCase__ )
lowercase_ = _maybe_download(
UpperCAmelCase__ , UpperCAmelCase__ , source_url + train_labels_file )
with gfile.Open(UpperCAmelCase__ , """rb""" ) as f:
lowercase_ = _extract_labels(UpperCAmelCase__ , one_hot=UpperCAmelCase__ )
lowercase_ = _maybe_download(
UpperCAmelCase__ , UpperCAmelCase__ , source_url + test_images_file )
with gfile.Open(UpperCAmelCase__ , """rb""" ) as f:
lowercase_ = _extract_images(UpperCAmelCase__ )
lowercase_ = _maybe_download(
UpperCAmelCase__ , UpperCAmelCase__ , source_url + test_labels_file )
with gfile.Open(UpperCAmelCase__ , """rb""" ) as f:
lowercase_ = _extract_labels(UpperCAmelCase__ , one_hot=UpperCAmelCase__ )
if not 0 <= validation_size <= len(UpperCAmelCase__ ):
lowercase_ = (
"""Validation size should be between 0 and """
F'''{len(UpperCAmelCase__ )}. Received: {validation_size}.'''
)
raise ValueError(UpperCAmelCase__ )
lowercase_ = train_images[:validation_size]
lowercase_ = train_labels[:validation_size]
lowercase_ = train_images[validation_size:]
lowercase_ = train_labels[validation_size:]
lowercase_ = {"""dtype""": dtype, """reshape""": reshape, """seed""": seed}
lowercase_ = _DataSet(UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ )
lowercase_ = _DataSet(UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ )
lowercase_ = _DataSet(UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ )
return _Datasets(train=UpperCAmelCase__ , validation=UpperCAmelCase__ , test=UpperCAmelCase__ )
| 650
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
a = {
'configuration_trocr': ['TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TrOCRConfig'],
'processing_trocr': ['TrOCRProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
'TROCR_PRETRAINED_MODEL_ARCHIVE_LIST',
'TrOCRForCausalLM',
'TrOCRPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 650
| 1
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..bit import BitConfig
a = logging.get_logger(__name__)
a = {
'Intel/dpt-large': 'https://huggingface.co/Intel/dpt-large/resolve/main/config.json',
# See all DPT models at https://huggingface.co/models?filter=dpt
}
class UpperCamelCase__ ( __magic_name__ ):
__SCREAMING_SNAKE_CASE : List[Any] = 'dpt'
def __init__( self : int , UpperCamelCase__ : int=768 , UpperCamelCase__ : List[Any]=12 , UpperCamelCase__ : Tuple=12 , UpperCamelCase__ : str=3_072 , UpperCamelCase__ : Any="gelu" , UpperCamelCase__ : Optional[Any]=0.0 , UpperCamelCase__ : List[str]=0.0 , UpperCamelCase__ : Any=0.02 , UpperCamelCase__ : List[str]=1e-12 , UpperCamelCase__ : Union[str, Any]=384 , UpperCamelCase__ : Optional[int]=16 , UpperCamelCase__ : str=3 , UpperCamelCase__ : Optional[Any]=False , UpperCamelCase__ : Any=True , UpperCamelCase__ : Optional[Any]=[2, 5, 8, 11] , UpperCamelCase__ : Dict="project" , UpperCamelCase__ : Tuple=[4, 2, 1, 0.5] , UpperCamelCase__ : str=[96, 192, 384, 768] , UpperCamelCase__ : int=256 , UpperCamelCase__ : Optional[int]=-1 , UpperCamelCase__ : str=False , UpperCamelCase__ : str=True , UpperCamelCase__ : int=0.4 , UpperCamelCase__ : Tuple=255 , UpperCamelCase__ : str=0.1 , UpperCamelCase__ : Tuple=[1, 1_024, 24, 24] , UpperCamelCase__ : Optional[Any]=[0, 1] , UpperCamelCase__ : Optional[Any]=None , **UpperCamelCase__ : List[str] , ):
'''simple docstring'''
super().__init__(**UpperCamelCase__ )
lowercase_ = hidden_size
lowercase_ = is_hybrid
if self.is_hybrid:
if backbone_config is None:
logger.info("""Initializing the config with a `BiT` backbone.""" )
lowercase_ = {
"""global_padding""": """same""",
"""layer_type""": """bottleneck""",
"""depths""": [3, 4, 9],
"""out_features""": ["""stage1""", """stage2""", """stage3"""],
"""embedding_dynamic_padding""": True,
}
lowercase_ = BitConfig(**UpperCamelCase__ )
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
logger.info("""Initializing the config with a `BiT` backbone.""" )
lowercase_ = BitConfig(**UpperCamelCase__ )
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
lowercase_ = backbone_config
else:
raise ValueError(
F'''backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.''' )
lowercase_ = backbone_featmap_shape
lowercase_ = neck_ignore_stages
if readout_type != "project":
raise ValueError("""Readout type must be 'project' when using `DPT-hybrid` mode.""" )
else:
lowercase_ = None
lowercase_ = None
lowercase_ = []
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = intermediate_size
lowercase_ = hidden_act
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = initializer_range
lowercase_ = layer_norm_eps
lowercase_ = image_size
lowercase_ = patch_size
lowercase_ = num_channels
lowercase_ = qkv_bias
lowercase_ = backbone_out_indices
if readout_type not in ["ignore", "add", "project"]:
raise ValueError("""Readout_type must be one of ['ignore', 'add', 'project']""" )
lowercase_ = readout_type
lowercase_ = reassemble_factors
lowercase_ = neck_hidden_sizes
lowercase_ = fusion_hidden_size
lowercase_ = head_in_index
lowercase_ = use_batch_norm_in_fusion_residual
# auxiliary head attributes (semantic segmentation)
lowercase_ = use_auxiliary_head
lowercase_ = auxiliary_loss_weight
lowercase_ = semantic_loss_ignore_index
lowercase_ = semantic_classifier_dropout
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
lowercase_ = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
lowercase_ = self.backbone_config.to_dict()
lowercase_ = self.__class__.model_type
return output
| 650
|
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
a = logging.get_logger(__name__)
a = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
a = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
for attribute in key.split(""".""" ):
lowercase_ = getattr(UpperCAmelCase__ , UpperCAmelCase__ )
if weight_type is not None:
lowercase_ = getattr(UpperCAmelCase__ , UpperCAmelCase__ ).shape
else:
lowercase_ = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
lowercase_ = value
elif weight_type == "weight_g":
lowercase_ = value
elif weight_type == "weight_v":
lowercase_ = value
elif weight_type == "bias":
lowercase_ = value
else:
lowercase_ = value
logger.info(F'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase_ = []
lowercase_ = fairseq_model.state_dict()
lowercase_ = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
lowercase_ = None
for name, value in fairseq_dict.items():
lowercase_ = False
if "conv_layers" in name:
load_conv_layer(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , hf_model.config.feat_extract_norm == """group""" , )
lowercase_ = True
elif name.split(""".""" )[0] == "proj":
lowercase_ = fairseq_model.proj
lowercase_ = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
lowercase_ = True
if "*" in mapped_key:
lowercase_ = name.split(UpperCAmelCase__ )[0].split(""".""" )[-2]
lowercase_ = mapped_key.replace("""*""" , UpperCAmelCase__ )
if "weight_g" in name:
lowercase_ = """weight_g"""
elif "weight_v" in name:
lowercase_ = """weight_v"""
elif "bias" in name:
lowercase_ = """bias"""
elif "weight" in name:
lowercase_ = """weight"""
else:
lowercase_ = None
set_recursively(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
continue
if not is_used:
unused_weights.append(UpperCAmelCase__ )
logger.warning(F'''Unused weights: {unused_weights}''' )
return proj_weight
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase_ = full_name.split("""conv_layers.""" )[-1]
lowercase_ = name.split(""".""" )
lowercase_ = int(items[0] )
lowercase_ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
lowercase_ = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
lowercase_ = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
lowercase_ = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
lowercase_ = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(UpperCAmelCase__ )
def UpperCAmelCase_ ( UpperCAmelCase__ ):
lowercase_ , lowercase_ = emb.weight.shape
lowercase_ = nn.Linear(UpperCAmelCase__ , UpperCAmelCase__ , bias=UpperCAmelCase__ )
lowercase_ = emb.weight.data
return lin_layer
def UpperCAmelCase_ ( UpperCAmelCase__ ):
with open(UpperCAmelCase__ , """r""" , encoding="""utf-8""" ) as f:
lowercase_ = f.readlines()
lowercase_ = [line.split(""" """ )[0] for line in lines]
lowercase_ = len(UpperCAmelCase__ )
lowercase_ = {
"""<s>""": 0,
"""<pad>""": 1,
"""</s>""": 2,
"""<unk>""": 3,
}
vocab_dict.update(dict(zip(UpperCAmelCase__ , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , ):
lowercase_ = WavaVecaConfig.from_pretrained(UpperCAmelCase__ )
lowercase_ = SpeechaTextaConfig.from_pretrained(
UpperCAmelCase__ , vocab_size=UpperCAmelCase__ , decoder_layers=UpperCAmelCase__ , do_stable_layer_norm=UpperCAmelCase__ )
lowercase_ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , )
lowercase_ , lowercase_ , lowercase_ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
lowercase_ = model[0].eval()
# set weights for wav2vec2 encoder
lowercase_ = WavaVecaModel(UpperCAmelCase__ )
lowercase_ = recursively_load_weights_wavaveca(model.encoder , UpperCAmelCase__ )
lowercase_ = SpeechaTextaForCausalLM(UpperCAmelCase__ )
lowercase_ , lowercase_ = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=UpperCAmelCase__ )
# set output linear layer
unexpected_keys.remove("""embed_out""" )
lowercase_ = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(F'''The following keys are missing when loading the decoder weights: {missing_keys}''' )
logger.warning(F'''The following keys are unexpected when loading the decoder weights: {unexpected_keys}''' )
lowercase_ = SpeechEncoderDecoderModel(encoder=UpperCAmelCase__ , decoder=UpperCAmelCase__ )
lowercase_ = False
# add projection layer
lowercase_ = nn.Parameter(projection_layer.weight )
lowercase_ = nn.Parameter(projection_layer.bias )
lowercase_ = create_vocab_dict(UpperCAmelCase__ )
with open(os.path.join(UpperCAmelCase__ , """vocab.json""" ) , """w""" ) as fp:
json.dump(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase_ = SpeechaTextaTokenizer(os.path.join(UpperCAmelCase__ , """vocab.json""" ) )
tokenizer.save_pretrained(UpperCAmelCase__ )
lowercase_ = hf_wavavec.config.to_dict()
lowercase_ = tokenizer.pad_token_id
lowercase_ = tokenizer.bos_token_id
lowercase_ = tokenizer.eos_token_id
lowercase_ = """speech_to_text_2"""
lowercase_ = """wav2vec2"""
lowercase_ = SpeechEncoderDecoderConfig.from_dict(UpperCAmelCase__ )
hf_wavavec.save_pretrained(UpperCAmelCase__ )
feature_extractor.save_pretrained(UpperCAmelCase__ )
if __name__ == "__main__":
a = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument(
'--encoder_config_path',
default='facebook/wav2vec2-large-lv60',
type=str,
help='Path to hf encoder wav2vec2 checkpoint config',
)
parser.add_argument(
'--decoder_config_path',
default='facebook/s2t-small-mustc-en-fr-st',
type=str,
help='Path to hf decoder s2t checkpoint config',
)
parser.add_argument('--vocab_size', default=1_0_2_2_4, type=int, help='Vocab size of decoder')
parser.add_argument('--num_decoder_layers', default=7, type=int, help='Number of decoder layers')
a = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 650
| 1
|
import os
import unittest
from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCamelCase__ ( __magic_name__ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Any = TransfoXLTokenizer
__SCREAMING_SNAKE_CASE : Tuple = False
__SCREAMING_SNAKE_CASE : List[Any] = False
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
super().setUp()
lowercase_ = [
"""<unk>""",
"""[CLS]""",
"""[SEP]""",
"""want""",
"""unwanted""",
"""wa""",
"""un""",
"""running""",
""",""",
"""low""",
"""l""",
]
lowercase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def UpperCAmelCase__ ( self : Tuple , **UpperCamelCase__ : Union[str, Any] ):
'''simple docstring'''
lowercase_ = True
return TransfoXLTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def UpperCAmelCase__ ( self : Optional[int] , UpperCamelCase__ : List[Any] ):
'''simple docstring'''
lowercase_ = """<unk> UNwanted , running"""
lowercase_ = """<unk> unwanted, running"""
return input_text, output_text
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase_ = TransfoXLTokenizer(vocab_file=self.vocab_file , lower_case=UpperCamelCase__ )
lowercase_ = tokenizer.tokenize("""<unk> UNwanted , running""" )
self.assertListEqual(UpperCamelCase__ , ["""<unk>""", """unwanted""", """,""", """running"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , [0, 4, 8, 7] )
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
lowercase_ = TransfoXLTokenizer(lower_case=UpperCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo ! how \n Are yoU ? """ ) , ["""hello""", """!""", """how""", """are""", """you""", """?"""] )
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ = TransfoXLTokenizer(lower_case=UpperCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo ! how \n Are yoU ? """ ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ = TransfoXLTokenizer(lower_case=UpperCamelCase__ )
lowercase_ = """Hello (bracket) and side-scrolled [and] Henry's $5,000 with 3.34 m. What's up!?"""
lowercase_ = [
"""Hello""",
"""(""",
"""bracket""",
""")""",
"""and""",
"""side""",
"""@-@""",
"""scrolled""",
"""[""",
"""and""",
"""]""",
"""Henry""",
"""'s""",
"""$""",
"""5""",
"""@,@""",
"""000""",
"""with""",
"""3""",
"""@.@""",
"""34""",
"""m""",
""".""",
"""What""",
"""'s""",
"""up""",
"""!""",
"""?""",
]
self.assertListEqual(tokenizer.tokenize(UpperCamelCase__ ) , UpperCamelCase__ )
self.assertEqual(tokenizer.convert_tokens_to_string(UpperCamelCase__ ) , UpperCamelCase__ )
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase_ = self.get_tokenizer()
lowercase_ = len(UpperCamelCase__ )
tokenizer.add_tokens(["""new1""", """new2"""] )
tokenizer.move_added_token("""new1""" , 1 )
# Check that moved token is not copied (duplicate)
self.assertEqual(len(UpperCamelCase__ ) , original_len + 2 )
# Check that token is moved to specified id
self.assertEqual(tokenizer.encode("""new1""" ) , [1] )
self.assertEqual(tokenizer.decode([1] ) , """new1""" )
| 650
|
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a = logging.get_logger(__name__)
# TODO Update this
a = {
'facebook/esm-1b': 'https://huggingface.co/facebook/esm-1b/resolve/main/config.json',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class UpperCamelCase__ ( __magic_name__ ):
__SCREAMING_SNAKE_CASE : Tuple = 'esm'
def __init__( self : Optional[Any] , UpperCamelCase__ : Dict=None , UpperCamelCase__ : Tuple=None , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : List[Any]=768 , UpperCamelCase__ : Tuple=12 , UpperCamelCase__ : Optional[int]=12 , UpperCamelCase__ : Dict=3_072 , UpperCamelCase__ : Union[str, Any]=0.1 , UpperCamelCase__ : Dict=0.1 , UpperCamelCase__ : Optional[int]=1_026 , UpperCamelCase__ : Any=0.02 , UpperCamelCase__ : Dict=1e-12 , UpperCamelCase__ : List[str]="absolute" , UpperCamelCase__ : Union[str, Any]=True , UpperCamelCase__ : Tuple=None , UpperCamelCase__ : Union[str, Any]=False , UpperCamelCase__ : Optional[int]=False , UpperCamelCase__ : Dict=None , UpperCamelCase__ : str=None , **UpperCamelCase__ : str , ):
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase__ , mask_token_id=UpperCamelCase__ , **UpperCamelCase__ )
lowercase_ = vocab_size
lowercase_ = hidden_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = intermediate_size
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = max_position_embeddings
lowercase_ = initializer_range
lowercase_ = layer_norm_eps
lowercase_ = position_embedding_type
lowercase_ = use_cache
lowercase_ = emb_layer_norm_before
lowercase_ = token_dropout
lowercase_ = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info("""No esmfold_config supplied for folding model, using default values.""" )
lowercase_ = EsmFoldConfig()
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
lowercase_ = EsmFoldConfig(**UpperCamelCase__ )
lowercase_ = esmfold_config
if vocab_list is None:
logger.warning("""No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!""" )
lowercase_ = get_default_vocab_list()
else:
lowercase_ = vocab_list
else:
lowercase_ = None
lowercase_ = None
if self.esmfold_config is not None and getattr(self.esmfold_config , """use_esm_attn_map""" , UpperCamelCase__ ):
raise ValueError("""The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!""" )
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
lowercase_ = super().to_dict()
if isinstance(self.esmfold_config , UpperCamelCase__ ):
lowercase_ = self.esmfold_config.to_dict()
return output
@dataclass
class UpperCamelCase__ :
__SCREAMING_SNAKE_CASE : str = None
__SCREAMING_SNAKE_CASE : bool = True
__SCREAMING_SNAKE_CASE : bool = False
__SCREAMING_SNAKE_CASE : bool = False
__SCREAMING_SNAKE_CASE : bool = False
__SCREAMING_SNAKE_CASE : float = 0
__SCREAMING_SNAKE_CASE : bool = True
__SCREAMING_SNAKE_CASE : bool = False
__SCREAMING_SNAKE_CASE : int = 128
__SCREAMING_SNAKE_CASE : "TrunkConfig" = None
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
if self.trunk is None:
lowercase_ = TrunkConfig()
elif isinstance(self.trunk , UpperCamelCase__ ):
lowercase_ = TrunkConfig(**self.trunk )
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ = asdict(self )
lowercase_ = self.trunk.to_dict()
return output
@dataclass
class UpperCamelCase__ :
__SCREAMING_SNAKE_CASE : int = 48
__SCREAMING_SNAKE_CASE : int = 1024
__SCREAMING_SNAKE_CASE : int = 128
__SCREAMING_SNAKE_CASE : int = 32
__SCREAMING_SNAKE_CASE : int = 32
__SCREAMING_SNAKE_CASE : int = 32
__SCREAMING_SNAKE_CASE : float = 0
__SCREAMING_SNAKE_CASE : float = 0
__SCREAMING_SNAKE_CASE : bool = False
__SCREAMING_SNAKE_CASE : int = 4
__SCREAMING_SNAKE_CASE : Optional[int] = 128
__SCREAMING_SNAKE_CASE : "StructureModuleConfig" = None
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
if self.structure_module is None:
lowercase_ = StructureModuleConfig()
elif isinstance(self.structure_module , UpperCamelCase__ ):
lowercase_ = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(F'''`max_recycles` should be positive, got {self.max_recycles}.''' )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
"""`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got"""
F''' {self.sequence_state_dim} and {self.sequence_state_dim}.''' )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
"""`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got"""
F''' {self.pairwise_state_dim} and {self.pairwise_state_dim}.''' )
lowercase_ = self.sequence_state_dim // self.sequence_head_width
lowercase_ = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
"""`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got"""
F''' {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.''' )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
"""`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got"""
F''' {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.''' )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(F'''`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.''' )
if self.dropout >= 0.4:
raise ValueError(F'''`dropout` should not be greater than 0.4, got {self.dropout}.''' )
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ = asdict(self )
lowercase_ = self.structure_module.to_dict()
return output
@dataclass
class UpperCamelCase__ :
__SCREAMING_SNAKE_CASE : int = 384
__SCREAMING_SNAKE_CASE : int = 128
__SCREAMING_SNAKE_CASE : int = 16
__SCREAMING_SNAKE_CASE : int = 128
__SCREAMING_SNAKE_CASE : int = 12
__SCREAMING_SNAKE_CASE : int = 4
__SCREAMING_SNAKE_CASE : int = 8
__SCREAMING_SNAKE_CASE : float = 0.1
__SCREAMING_SNAKE_CASE : int = 8
__SCREAMING_SNAKE_CASE : int = 1
__SCREAMING_SNAKE_CASE : int = 2
__SCREAMING_SNAKE_CASE : int = 7
__SCREAMING_SNAKE_CASE : int = 10
__SCREAMING_SNAKE_CASE : float = 1e-8
__SCREAMING_SNAKE_CASE : float = 1e5
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
return asdict(self )
def UpperCAmelCase_ ( ):
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 650
| 1
|
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import platform
import sys
a = '3'
print('Python version:', sys.version)
print('OS platform:', platform.platform())
print('OS architecture:', platform.machine())
try:
import torch
print('Torch version:', torch.__version__)
print('Cuda available:', torch.cuda.is_available())
print('Cuda version:', torch.version.cuda)
print('CuDNN version:', torch.backends.cudnn.version())
print('Number of GPUs available:', torch.cuda.device_count())
except ImportError:
print('Torch version:', None)
try:
import transformers
print('transformers version:', transformers.__version__)
except ImportError:
print('transformers version:', None)
| 650
|
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import platform
import numpy as np
import psutil
import torch
from accelerate import __version__ as version
from accelerate.commands.config import default_config_file, load_config_from_file
from ..utils import is_npu_available, is_xpu_available
def UpperCAmelCase_ ( UpperCAmelCase__=None ):
if subparsers is not None:
lowercase_ = subparsers.add_parser("""env""" )
else:
lowercase_ = argparse.ArgumentParser("""Accelerate env command""" )
parser.add_argument(
"""--config_file""" , default=UpperCAmelCase__ , help="""The config file to use for the default values in the launching script.""" )
if subparsers is not None:
parser.set_defaults(func=UpperCAmelCase__ )
return parser
def UpperCAmelCase_ ( UpperCAmelCase__ ):
lowercase_ = torch.__version__
lowercase_ = torch.cuda.is_available()
lowercase_ = is_xpu_available()
lowercase_ = is_npu_available()
lowercase_ = """Not found"""
# Get the default from the config file.
if args.config_file is not None or os.path.isfile(UpperCAmelCase__ ):
lowercase_ = load_config_from_file(args.config_file ).to_dict()
lowercase_ = {
"""`Accelerate` version""": version,
"""Platform""": platform.platform(),
"""Python version""": platform.python_version(),
"""Numpy version""": np.__version__,
"""PyTorch version (GPU?)""": F'''{pt_version} ({pt_cuda_available})''',
"""PyTorch XPU available""": str(UpperCAmelCase__ ),
"""PyTorch NPU available""": str(UpperCAmelCase__ ),
"""System RAM""": F'''{psutil.virtual_memory().total / 1_0_2_4 ** 3:.2f} GB''',
}
if pt_cuda_available:
lowercase_ = torch.cuda.get_device_name()
print("""\nCopy-and-paste the text below in your GitHub issue\n""" )
print("""\n""".join([F'''- {prop}: {val}''' for prop, val in info.items()] ) )
print("""- `Accelerate` default config:""" if args.config_file is None else """- `Accelerate` config passed:""" )
lowercase_ = (
"""\n""".join([F'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()] )
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
else F'''\t{accelerate_config}'''
)
print(UpperCAmelCase__ )
lowercase_ = accelerate_config
return info
def UpperCAmelCase_ ( ):
lowercase_ = env_command_parser()
lowercase_ = parser.parse_args()
env_command(UpperCAmelCase__ )
return 0
if __name__ == "__main__":
raise SystemExit(main())
| 650
| 1
|
import os
import re
import warnings
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_ta import TaTokenizer
else:
a = None
a = logging.get_logger(__name__)
a = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
a = {
'vocab_file': {
't5-small': 'https://huggingface.co/t5-small/resolve/main/spiece.model',
't5-base': 'https://huggingface.co/t5-base/resolve/main/spiece.model',
't5-large': 'https://huggingface.co/t5-large/resolve/main/spiece.model',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/spiece.model',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/spiece.model',
},
'tokenizer_file': {
't5-small': 'https://huggingface.co/t5-small/resolve/main/tokenizer.json',
't5-base': 'https://huggingface.co/t5-base/resolve/main/tokenizer.json',
't5-large': 'https://huggingface.co/t5-large/resolve/main/tokenizer.json',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/tokenizer.json',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/tokenizer.json',
},
}
# TODO(PVP) - this should be removed in Transformers v5
a = {
't5-small': 5_1_2,
't5-base': 5_1_2,
't5-large': 5_1_2,
't5-3b': 5_1_2,
't5-11b': 5_1_2,
}
class UpperCamelCase__ ( __magic_name__ ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE : int = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE : str = ['input_ids', 'attention_mask']
__SCREAMING_SNAKE_CASE : Dict = TaTokenizer
__SCREAMING_SNAKE_CASE : List[int] = []
def __init__( self : int , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : Any=None , UpperCamelCase__ : Dict="</s>" , UpperCamelCase__ : Tuple="<unk>" , UpperCamelCase__ : Optional[Any]="<pad>" , UpperCamelCase__ : Union[str, Any]=100 , UpperCamelCase__ : Optional[Any]=None , **UpperCamelCase__ : List[str] , ):
'''simple docstring'''
if extra_ids > 0 and additional_special_tokens is None:
lowercase_ = [F'''<extra_id_{i}>''' for i in range(UpperCamelCase__ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra special tokens
lowercase_ = len(set(filter(lambda UpperCamelCase__ : bool("""extra_id_""" in str(UpperCamelCase__ ) ) , UpperCamelCase__ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'''
""" provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids"""
""" tokens""" )
super().__init__(
UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , extra_ids=UpperCamelCase__ , additional_special_tokens=UpperCamelCase__ , **UpperCamelCase__ , )
lowercase_ = vocab_file
lowercase_ = False if not self.vocab_file else True
lowercase_ = extra_ids
@staticmethod
def UpperCAmelCase__ ( UpperCamelCase__ : Tuple , UpperCamelCase__ : str , UpperCamelCase__ : int ):
'''simple docstring'''
if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes:
lowercase_ = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
"""This tokenizer was incorrectly instantiated with a model max length of"""
F''' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'''
""" behavior is kept to avoid breaking backwards compatibility when padding/encoding with"""
""" `truncation is True`.\n- Be aware that you SHOULD NOT rely on"""
F''' {pretrained_model_name_or_path} automatically truncating your input to'''
F''' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'''
F''' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'''
""" `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please"""
""" instantiate this tokenizer with `model_max_length` set to your preferred value.""" , UpperCamelCase__ , )
return max_model_length
def UpperCAmelCase__ ( self : int , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(UpperCamelCase__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowercase_ = os.path.join(
UpperCamelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ):
copyfile(self.vocab_file , UpperCamelCase__ )
logger.info(F'''Copy vocab file to {out_vocab_file}''' )
return (out_vocab_file,)
def UpperCAmelCase__ ( self : int , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
'''simple docstring'''
lowercase_ = token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return self.prefix_tokens + token_ids_a
else:
lowercase_ = token_ids_a + [self.eos_token_id]
return self.prefix_tokens + token_ids_a + token_ids_a
def UpperCAmelCase__ ( self : str , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
'''simple docstring'''
lowercase_ = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
return list(
set(filter(lambda UpperCamelCase__ : bool(re.search(R"""<extra_id_\d+>""" , UpperCamelCase__ ) ) is not None , self.additional_special_tokens ) ) )
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
return [self.convert_tokens_to_ids(UpperCamelCase__ ) for token in self.get_sentinel_tokens()]
| 650
|
import inspect
import unittest
import warnings
from transformers import DeiTConfig
from transformers.models.auto import get_values
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
)
from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class UpperCamelCase__ :
def __init__( self : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple=13 , UpperCamelCase__ : Tuple=30 , UpperCamelCase__ : List[Any]=2 , UpperCamelCase__ : Union[str, Any]=3 , UpperCamelCase__ : Union[str, Any]=True , UpperCamelCase__ : Tuple=True , UpperCamelCase__ : int=32 , UpperCamelCase__ : Tuple=5 , UpperCamelCase__ : List[Any]=4 , UpperCamelCase__ : Dict=37 , UpperCamelCase__ : Tuple="gelu" , UpperCamelCase__ : Union[str, Any]=0.1 , UpperCamelCase__ : Optional[int]=0.1 , UpperCamelCase__ : Any=10 , UpperCamelCase__ : Optional[int]=0.02 , UpperCamelCase__ : Dict=3 , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : Union[str, Any]=2 , ):
'''simple docstring'''
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = image_size
lowercase_ = patch_size
lowercase_ = num_channels
lowercase_ = is_training
lowercase_ = use_labels
lowercase_ = hidden_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = intermediate_size
lowercase_ = hidden_act
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = type_sequence_label_size
lowercase_ = initializer_range
lowercase_ = scope
lowercase_ = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
lowercase_ = (image_size // patch_size) ** 2
lowercase_ = num_patches + 2
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase_ = None
if self.use_labels:
lowercase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase_ = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def UpperCAmelCase__ ( self : Tuple , UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple ):
'''simple docstring'''
lowercase_ = DeiTModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowercase_ = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase__ ( self : Optional[int] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[str] , UpperCamelCase__ : str ):
'''simple docstring'''
lowercase_ = DeiTForMaskedImageModeling(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowercase_ = model(UpperCamelCase__ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowercase_ = 1
lowercase_ = DeiTForMaskedImageModeling(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowercase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase_ = model(UpperCamelCase__ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def UpperCAmelCase__ ( self : Tuple , UpperCamelCase__ : str , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[str] ):
'''simple docstring'''
lowercase_ = self.type_sequence_label_size
lowercase_ = DeiTForImageClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowercase_ = model(UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowercase_ = 1
lowercase_ = DeiTForImageClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowercase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase_ = model(UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
lowercase_ = self.prepare_config_and_inputs()
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) = config_and_inputs
lowercase_ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( __magic_name__ , __magic_name__ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Any = (
(
DeiTModel,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
__SCREAMING_SNAKE_CASE : str = (
{
'feature-extraction': DeiTModel,
'image-classification': (DeiTForImageClassification, DeiTForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
__SCREAMING_SNAKE_CASE : Dict = False
__SCREAMING_SNAKE_CASE : List[Any] = False
__SCREAMING_SNAKE_CASE : List[Any] = False
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ = DeiTModelTester(self )
lowercase_ = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ , hidden_size=37 )
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""DeiT does not use inputs_embeds""" )
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
pass
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ = model_class(UpperCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase__ , nn.Linear ) )
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ = model_class(UpperCamelCase__ )
lowercase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ = [*signature.parameters.keys()]
lowercase_ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*UpperCamelCase__ )
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase__ )
def UpperCAmelCase__ ( self : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any]=False ):
'''simple docstring'''
lowercase_ = super()._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
if return_labels:
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
if not self.model_tester.is_training:
return
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ = True
for model_class in self.all_model_classes:
# DeiTForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(UpperCamelCase__ )
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
lowercase_ = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.train()
lowercase_ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
lowercase_ = model(**UpperCamelCase__ ).loss
loss.backward()
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
lowercase_ = False
lowercase_ = True
for model_class in self.all_model_classes:
if model_class in get_values(UpperCamelCase__ ) or not model_class.supports_gradient_checkpointing:
continue
# DeiTForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
continue
lowercase_ = model_class(UpperCamelCase__ )
model.gradient_checkpointing_enable()
model.to(UpperCamelCase__ )
model.train()
lowercase_ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
lowercase_ = model(**UpperCamelCase__ ).loss
loss.backward()
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ = [
{"""title""": """multi_label_classification""", """num_labels""": 2, """dtype""": torch.float},
{"""title""": """single_label_classification""", """num_labels""": 1, """dtype""": torch.long},
{"""title""": """regression""", """num_labels""": 1, """dtype""": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(UpperCamelCase__ ),
*get_values(UpperCamelCase__ ),
]
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F'''Testing {model_class} with {problem_type["title"]}''' ):
lowercase_ = problem_type["""title"""]
lowercase_ = problem_type["""num_labels"""]
lowercase_ = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.train()
lowercase_ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
if problem_type["num_labels"] > 1:
lowercase_ = inputs["""labels"""].unsqueeze(1 ).repeat(1 , problem_type["""num_labels"""] )
lowercase_ = inputs["""labels"""].to(problem_type["""dtype"""] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=UpperCamelCase__ ) as warning_list:
lowercase_ = model(**UpperCamelCase__ ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F'''Something is going wrong in the regression problem: intercepted {w.message}''' )
loss.backward()
@slow
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ = DeiTModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def UpperCAmelCase_ ( ):
lowercase_ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class UpperCamelCase__ ( unittest.TestCase ):
@cached_property
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
return (
DeiTImageProcessor.from_pretrained("""facebook/deit-base-distilled-patch16-224""" )
if is_vision_available()
else None
)
@slow
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
lowercase_ = DeiTForImageClassificationWithTeacher.from_pretrained("""facebook/deit-base-distilled-patch16-224""" ).to(
UpperCamelCase__ )
lowercase_ = self.default_image_processor
lowercase_ = prepare_img()
lowercase_ = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
lowercase_ = model(**UpperCamelCase__ )
# verify the logits
lowercase_ = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
lowercase_ = torch.tensor([-1.0_266, 0.1_912, -1.2_861] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1e-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ = DeiTModel.from_pretrained(
"""facebook/deit-base-distilled-patch16-224""" , torch_dtype=torch.floataa , device_map="""auto""" )
lowercase_ = self.default_image_processor
lowercase_ = prepare_img()
lowercase_ = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" )
lowercase_ = inputs.pixel_values.to(UpperCamelCase__ )
# forward pass to make sure inference works in fp16
with torch.no_grad():
lowercase_ = model(UpperCamelCase__ )
| 650
| 1
|
import numpy as np
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ):
return np.where(vector > 0 , UpperCAmelCase__ , (alpha * (np.exp(UpperCAmelCase__ ) - 1)) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 650
|
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 650
| 1
|
from typing import List, Optional, Tuple, Union
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from ... import AutoBackbone
from ...modeling_outputs import SemanticSegmenterOutput
from ...modeling_utils import PreTrainedModel
from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings
from ...utils.backbone_utils import BackboneMixin
from .configuration_upernet import UperNetConfig
a = [
'openmmlab/upernet-convnext-tiny',
# See all UperNet models at https://huggingface.co/models?filter=upernet
]
# General docstring
a = 'UperNetConfig'
class UpperCamelCase__ ( nn.Module ):
def __init__( self : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : Union[int, Tuple[int, int]] , UpperCamelCase__ : Union[int, Tuple[int, int], str] = 0 , UpperCamelCase__ : bool = False , UpperCamelCase__ : Union[int, Tuple[int, int]] = 1 , ):
'''simple docstring'''
super().__init__()
lowercase_ = nn.Convad(
in_channels=UpperCamelCase__ , out_channels=UpperCamelCase__ , kernel_size=UpperCamelCase__ , padding=UpperCamelCase__ , bias=UpperCamelCase__ , dilation=UpperCamelCase__ , )
lowercase_ = nn.BatchNormad(UpperCamelCase__ )
lowercase_ = nn.ReLU()
def UpperCAmelCase__ ( self : int , UpperCamelCase__ : torch.Tensor ):
'''simple docstring'''
lowercase_ = self.conv(UpperCamelCase__ )
lowercase_ = self.batch_norm(UpperCamelCase__ )
lowercase_ = self.activation(UpperCamelCase__ )
return output
class UpperCamelCase__ ( nn.Module ):
def __init__( self : Dict , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int ):
'''simple docstring'''
super().__init__()
lowercase_ = [
nn.AdaptiveAvgPoolad(UpperCamelCase__ ),
UperNetConvModule(UpperCamelCase__ , UpperCamelCase__ , kernel_size=1 ),
]
for i, layer in enumerate(self.layers ):
self.add_module(str(UpperCamelCase__ ) , UpperCamelCase__ )
def UpperCAmelCase__ ( self : str , UpperCamelCase__ : torch.Tensor ):
'''simple docstring'''
lowercase_ = input
for layer in self.layers:
lowercase_ = layer(UpperCamelCase__ )
return hidden_state
class UpperCamelCase__ ( nn.Module ):
def __init__( self : str , UpperCamelCase__ : Tuple[int, ...] , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : bool ):
'''simple docstring'''
super().__init__()
lowercase_ = pool_scales
lowercase_ = align_corners
lowercase_ = in_channels
lowercase_ = channels
lowercase_ = []
for i, pool_scale in enumerate(UpperCamelCase__ ):
lowercase_ = UperNetPyramidPoolingBlock(pool_scale=UpperCamelCase__ , in_channels=UpperCamelCase__ , channels=UpperCamelCase__ )
self.blocks.append(UpperCamelCase__ )
self.add_module(str(UpperCamelCase__ ) , UpperCamelCase__ )
def UpperCAmelCase__ ( self : Tuple , UpperCamelCase__ : torch.Tensor ):
'''simple docstring'''
lowercase_ = []
for ppm in self.blocks:
lowercase_ = ppm(UpperCamelCase__ )
lowercase_ = nn.functional.interpolate(
UpperCamelCase__ , size=x.size()[2:] , mode="""bilinear""" , align_corners=self.align_corners )
ppm_outs.append(UpperCamelCase__ )
return ppm_outs
class UpperCamelCase__ ( nn.Module ):
def __init__( self : List[str] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[int] ):
'''simple docstring'''
super().__init__()
lowercase_ = config
lowercase_ = config.pool_scales # e.g. (1, 2, 3, 6)
lowercase_ = in_channels
lowercase_ = config.hidden_size
lowercase_ = False
lowercase_ = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
# PSP Module
lowercase_ = UperNetPyramidPoolingModule(
self.pool_scales , self.in_channels[-1] , self.channels , align_corners=self.align_corners , )
lowercase_ = UperNetConvModule(
self.in_channels[-1] + len(self.pool_scales ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
# FPN Module
lowercase_ = nn.ModuleList()
lowercase_ = nn.ModuleList()
for in_channels in self.in_channels[:-1]: # skip the top layer
lowercase_ = UperNetConvModule(UpperCamelCase__ , self.channels , kernel_size=1 )
lowercase_ = UperNetConvModule(self.channels , self.channels , kernel_size=3 , padding=1 )
self.lateral_convs.append(UpperCamelCase__ )
self.fpn_convs.append(UpperCamelCase__ )
lowercase_ = UperNetConvModule(
len(self.in_channels ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
self.apply(self._init_weights )
def UpperCAmelCase__ ( self : Optional[int] , UpperCamelCase__ : Optional[int] ):
'''simple docstring'''
if isinstance(UpperCamelCase__ , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def UpperCAmelCase__ ( self : Union[str, Any] , UpperCamelCase__ : List[Any] ):
'''simple docstring'''
lowercase_ = inputs[-1]
lowercase_ = [x]
psp_outs.extend(self.psp_modules(UpperCamelCase__ ) )
lowercase_ = torch.cat(UpperCamelCase__ , dim=1 )
lowercase_ = self.bottleneck(UpperCamelCase__ )
return output
def UpperCAmelCase__ ( self : Dict , UpperCamelCase__ : torch.Tensor ):
'''simple docstring'''
lowercase_ = [lateral_conv(encoder_hidden_states[i] ) for i, lateral_conv in enumerate(self.lateral_convs )]
laterals.append(self.psp_forward(UpperCamelCase__ ) )
# build top-down path
lowercase_ = len(UpperCamelCase__ )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
lowercase_ = laterals[i - 1].shape[2:]
lowercase_ = laterals[i - 1] + nn.functional.interpolate(
laterals[i] , size=UpperCamelCase__ , mode="""bilinear""" , align_corners=self.align_corners )
# build outputs
lowercase_ = [self.fpn_convs[i](laterals[i] ) for i in range(used_backbone_levels - 1 )]
# append psp feature
fpn_outs.append(laterals[-1] )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
lowercase_ = nn.functional.interpolate(
fpn_outs[i] , size=fpn_outs[0].shape[2:] , mode="""bilinear""" , align_corners=self.align_corners )
lowercase_ = torch.cat(UpperCamelCase__ , dim=1 )
lowercase_ = self.fpn_bottleneck(UpperCamelCase__ )
lowercase_ = self.classifier(UpperCamelCase__ )
return output
class UpperCamelCase__ ( nn.Module ):
def __init__( self : Optional[int] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int = 2 , UpperCamelCase__ : int = 3 , UpperCamelCase__ : Union[int, Tuple[int, int]] = 1 ):
'''simple docstring'''
super().__init__()
lowercase_ = config
lowercase_ = config.auxiliary_in_channels
lowercase_ = config.auxiliary_channels
lowercase_ = config.auxiliary_num_convs
lowercase_ = config.auxiliary_concat_input
lowercase_ = in_index
lowercase_ = (kernel_size // 2) * dilation
lowercase_ = []
convs.append(
UperNetConvModule(
self.in_channels , self.channels , kernel_size=UpperCamelCase__ , padding=UpperCamelCase__ , dilation=UpperCamelCase__ ) )
for i in range(self.num_convs - 1 ):
convs.append(
UperNetConvModule(
self.channels , self.channels , kernel_size=UpperCamelCase__ , padding=UpperCamelCase__ , dilation=UpperCamelCase__ ) )
if self.num_convs == 0:
lowercase_ = nn.Identity()
else:
lowercase_ = nn.Sequential(*UpperCamelCase__ )
if self.concat_input:
lowercase_ = UperNetConvModule(
self.in_channels + self.channels , self.channels , kernel_size=UpperCamelCase__ , padding=kernel_size // 2 )
lowercase_ = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
self.apply(self._init_weights )
def UpperCAmelCase__ ( self : Tuple , UpperCamelCase__ : Union[str, Any] ):
'''simple docstring'''
if isinstance(UpperCamelCase__ , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def UpperCAmelCase__ ( self : str , UpperCamelCase__ : torch.Tensor ):
'''simple docstring'''
lowercase_ = encoder_hidden_states[self.in_index]
lowercase_ = self.convs(UpperCamelCase__ )
if self.concat_input:
lowercase_ = self.conv_cat(torch.cat([hidden_states, output] , dim=1 ) )
lowercase_ = self.classifier(UpperCamelCase__ )
return output
class UpperCamelCase__ ( __magic_name__ ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = UperNetConfig
__SCREAMING_SNAKE_CASE : Any = 'pixel_values'
__SCREAMING_SNAKE_CASE : str = True
def UpperCAmelCase__ ( self : List[str] , UpperCamelCase__ : str ):
'''simple docstring'''
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
module.backbone.init_weights()
module.decode_head.init_weights()
module.auxiliary_head.init_weights()
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
self.backbone.init_weights()
self.decode_head.init_weights()
self.auxiliary_head.init_weights()
def UpperCAmelCase__ ( self : str , UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple=False ):
'''simple docstring'''
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
lowercase_ = value
a = R'\n Parameters:\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n config ([`UperNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
a = R'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using\n [`AutoImageProcessor`]. See [`SegformerImageProcessor.__call__`] for details.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers in case the backbone has them. See\n `attentions` under returned tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers of the backbone. See `hidden_states` under\n returned tensors for more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
'UperNet framework leveraging any vision backbone e.g. for ADE20k, CityScapes.' , __magic_name__ , )
class UpperCamelCase__ ( __magic_name__ ):
def __init__( self : List[str] , UpperCamelCase__ : List[Any] ):
'''simple docstring'''
super().__init__(UpperCamelCase__ )
lowercase_ = AutoBackbone.from_config(config.backbone_config )
# Semantic segmentation head(s)
lowercase_ = UperNetHead(UpperCamelCase__ , in_channels=self.backbone.channels )
lowercase_ = UperNetFCNHead(UpperCamelCase__ ) if config.use_auxiliary_head else None
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UPERNET_INPUTS_DOCSTRING.format("""batch_size, sequence_length""" ) )
@replace_return_docstrings(output_type=UpperCamelCase__ , config_class=_CONFIG_FOR_DOC )
def UpperCAmelCase__ ( self : int , UpperCamelCase__ : Optional[torch.Tensor] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[torch.Tensor] = None , UpperCamelCase__ : Optional[bool] = None , ):
'''simple docstring'''
lowercase_ = return_dict if return_dict is not None else self.config.use_return_dict
lowercase_ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase_ = output_attentions if output_attentions is not None else self.config.output_attentions
lowercase_ = self.backbone.forward_with_filtered_kwargs(
UpperCamelCase__ , output_hidden_states=UpperCamelCase__ , output_attentions=UpperCamelCase__ )
lowercase_ = outputs.feature_maps
lowercase_ = self.decode_head(UpperCamelCase__ )
lowercase_ = nn.functional.interpolate(UpperCamelCase__ , size=pixel_values.shape[2:] , mode="""bilinear""" , align_corners=UpperCamelCase__ )
lowercase_ = None
if self.auxiliary_head is not None:
lowercase_ = self.auxiliary_head(UpperCamelCase__ )
lowercase_ = nn.functional.interpolate(
UpperCamelCase__ , size=pixel_values.shape[2:] , mode="""bilinear""" , align_corners=UpperCamelCase__ )
lowercase_ = None
if labels is not None:
if self.config.num_labels == 1:
raise ValueError("""The number of labels should be greater than one""" )
else:
# compute weighted loss
lowercase_ = CrossEntropyLoss(ignore_index=self.config.loss_ignore_index )
lowercase_ = loss_fct(UpperCamelCase__ , UpperCamelCase__ )
lowercase_ = loss_fct(UpperCamelCase__ , UpperCamelCase__ )
lowercase_ = main_loss + self.config.auxiliary_loss_weight * auxiliary_loss
if not return_dict:
if output_hidden_states:
lowercase_ = (logits,) + outputs[1:]
else:
lowercase_ = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SemanticSegmenterOutput(
loss=UpperCamelCase__ , logits=UpperCamelCase__ , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 650
|
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
a = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class UpperCamelCase__ ( __magic_name__ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[Any] = XGLMTokenizer
__SCREAMING_SNAKE_CASE : List[Any] = XGLMTokenizerFast
__SCREAMING_SNAKE_CASE : List[Any] = True
__SCREAMING_SNAKE_CASE : int = True
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
lowercase_ = XGLMTokenizer(UpperCamelCase__ , keep_accents=UpperCamelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
lowercase_ = """<pad>"""
lowercase_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase__ ) , UpperCamelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase__ ) , UpperCamelCase__ )
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
lowercase_ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(len(UpperCamelCase__ ) , 1_008 )
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1_008 )
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
lowercase_ = XGLMTokenizer(UpperCamelCase__ , keep_accents=UpperCamelCase__ )
lowercase_ = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(UpperCamelCase__ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
lowercase_ = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
UpperCamelCase__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
lowercase_ = tokenizer.convert_tokens_to_ids(UpperCamelCase__ )
self.assertListEqual(
UpperCamelCase__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
lowercase_ = tokenizer.convert_ids_to_tokens(UpperCamelCase__ )
self.assertListEqual(
UpperCamelCase__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
@cached_property
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
return XGLMTokenizer.from_pretrained("""facebook/xglm-564M""" )
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(UpperCamelCase__ , f.name )
lowercase_ = XGLMTokenizer(f.name , keep_accents=UpperCamelCase__ )
lowercase_ = pickle.dumps(UpperCamelCase__ )
pickle.loads(UpperCamelCase__ )
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
lowercase_ = self.get_tokenizer()
lowercase_ = self.get_rust_tokenizer()
lowercase_ = """I was born in 92000, and this is falsé."""
lowercase_ = tokenizer.tokenize(UpperCamelCase__ )
lowercase_ = rust_tokenizer.tokenize(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
lowercase_ = tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
lowercase_ = rust_tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
lowercase_ = self.get_rust_tokenizer()
lowercase_ = tokenizer.encode(UpperCamelCase__ )
lowercase_ = rust_tokenizer.encode(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
@slow
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ = """Hello World!"""
lowercase_ = [2, 31_227, 4_447, 35]
self.assertListEqual(UpperCamelCase__ , self.big_tokenizer.encode(UpperCamelCase__ ) )
@slow
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
lowercase_ = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth"""
)
# fmt: off
lowercase_ = [2, 1_018, 67, 11, 1_988, 2_617, 5_631, 278, 11, 3_407, 48, 71_630, 28_085, 4, 3_234, 157, 13, 6, 5, 6, 4, 3_526, 768, 15, 659, 57, 298, 3_983, 864, 129, 21, 6, 5, 13_675, 377, 652, 7_580, 10_341, 155, 2_817, 422, 1_666, 7, 1_674, 53, 113, 202_277, 17_892, 33, 60, 87, 4, 3_234, 157, 61, 2_667, 52_376, 19, 88, 23, 735]
# fmt: on
self.assertListEqual(UpperCamelCase__ , self.big_tokenizer.encode(UpperCamelCase__ ) )
@slow
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ = {
"""input_ids""": [[2, 108_825, 1_163, 15, 88_010, 473, 15_898, 157, 13_672, 1_857, 312, 8, 238_021, 1_163, 53, 13_672, 1_857, 312, 8, 53_283, 182_396, 8, 18_566, 16, 36_733, 4_101, 8, 230, 244_017, 122_553, 7, 15, 132_597, 4, 293, 12_511, 7_610, 4, 3_414, 132_597, 9, 4, 32_361, 362, 4, 734, 28_512, 32_569, 18, 4, 32_361, 26_096, 14_982, 73, 18_715, 21_433, 235_261, 15, 492, 12_427, 16, 53, 18_715, 21_433, 65_454, 15, 23_659, 563, 16, 278, 597, 2_843, 595, 7_931, 182_396, 64_186, 22, 886, 595, 132_981, 53, 25_540, 3_449, 43_982, 39_901, 5_951, 878, 330, 4, 27_694, 80_269, 312, 53, 6_517, 11_780, 611, 20_408, 5], [2, 6, 132_597, 67, 42_897, 33, 592, 8, 163_729, 25_540, 361, 136_997, 109_514, 173_230, 7, 501, 60, 102_913, 196, 5_631, 235, 63_243, 473, 6, 231_757, 74, 5_277, 7_905, 53, 3_095, 37_317, 22, 454, 183_874, 5], [2, 268, 31_298, 46_530, 6, 132_935, 43_831, 7, 597, 32, 24, 3_688, 9_865, 5]],
"""attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCamelCase__ , model_name="""facebook/xglm-564M""" , padding=UpperCamelCase__ , )
| 650
| 1
|
import inspect
import unittest
import warnings
from transformers import DeiTConfig
from transformers.models.auto import get_values
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
)
from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class UpperCamelCase__ :
def __init__( self : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple=13 , UpperCamelCase__ : Tuple=30 , UpperCamelCase__ : List[Any]=2 , UpperCamelCase__ : Union[str, Any]=3 , UpperCamelCase__ : Union[str, Any]=True , UpperCamelCase__ : Tuple=True , UpperCamelCase__ : int=32 , UpperCamelCase__ : Tuple=5 , UpperCamelCase__ : List[Any]=4 , UpperCamelCase__ : Dict=37 , UpperCamelCase__ : Tuple="gelu" , UpperCamelCase__ : Union[str, Any]=0.1 , UpperCamelCase__ : Optional[int]=0.1 , UpperCamelCase__ : Any=10 , UpperCamelCase__ : Optional[int]=0.02 , UpperCamelCase__ : Dict=3 , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : Union[str, Any]=2 , ):
'''simple docstring'''
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = image_size
lowercase_ = patch_size
lowercase_ = num_channels
lowercase_ = is_training
lowercase_ = use_labels
lowercase_ = hidden_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = intermediate_size
lowercase_ = hidden_act
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = type_sequence_label_size
lowercase_ = initializer_range
lowercase_ = scope
lowercase_ = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
lowercase_ = (image_size // patch_size) ** 2
lowercase_ = num_patches + 2
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase_ = None
if self.use_labels:
lowercase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase_ = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def UpperCAmelCase__ ( self : Tuple , UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple ):
'''simple docstring'''
lowercase_ = DeiTModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowercase_ = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase__ ( self : Optional[int] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[str] , UpperCamelCase__ : str ):
'''simple docstring'''
lowercase_ = DeiTForMaskedImageModeling(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowercase_ = model(UpperCamelCase__ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowercase_ = 1
lowercase_ = DeiTForMaskedImageModeling(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowercase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase_ = model(UpperCamelCase__ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def UpperCAmelCase__ ( self : Tuple , UpperCamelCase__ : str , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[str] ):
'''simple docstring'''
lowercase_ = self.type_sequence_label_size
lowercase_ = DeiTForImageClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowercase_ = model(UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowercase_ = 1
lowercase_ = DeiTForImageClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowercase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase_ = model(UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
lowercase_ = self.prepare_config_and_inputs()
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) = config_and_inputs
lowercase_ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( __magic_name__ , __magic_name__ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Any = (
(
DeiTModel,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
__SCREAMING_SNAKE_CASE : str = (
{
'feature-extraction': DeiTModel,
'image-classification': (DeiTForImageClassification, DeiTForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
__SCREAMING_SNAKE_CASE : Dict = False
__SCREAMING_SNAKE_CASE : List[Any] = False
__SCREAMING_SNAKE_CASE : List[Any] = False
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ = DeiTModelTester(self )
lowercase_ = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ , hidden_size=37 )
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""DeiT does not use inputs_embeds""" )
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
pass
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ = model_class(UpperCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase__ , nn.Linear ) )
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ = model_class(UpperCamelCase__ )
lowercase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ = [*signature.parameters.keys()]
lowercase_ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*UpperCamelCase__ )
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase__ )
def UpperCAmelCase__ ( self : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any]=False ):
'''simple docstring'''
lowercase_ = super()._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
if return_labels:
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
if not self.model_tester.is_training:
return
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ = True
for model_class in self.all_model_classes:
# DeiTForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(UpperCamelCase__ )
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
lowercase_ = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.train()
lowercase_ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
lowercase_ = model(**UpperCamelCase__ ).loss
loss.backward()
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
lowercase_ = False
lowercase_ = True
for model_class in self.all_model_classes:
if model_class in get_values(UpperCamelCase__ ) or not model_class.supports_gradient_checkpointing:
continue
# DeiTForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
continue
lowercase_ = model_class(UpperCamelCase__ )
model.gradient_checkpointing_enable()
model.to(UpperCamelCase__ )
model.train()
lowercase_ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
lowercase_ = model(**UpperCamelCase__ ).loss
loss.backward()
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ = [
{"""title""": """multi_label_classification""", """num_labels""": 2, """dtype""": torch.float},
{"""title""": """single_label_classification""", """num_labels""": 1, """dtype""": torch.long},
{"""title""": """regression""", """num_labels""": 1, """dtype""": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(UpperCamelCase__ ),
*get_values(UpperCamelCase__ ),
]
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F'''Testing {model_class} with {problem_type["title"]}''' ):
lowercase_ = problem_type["""title"""]
lowercase_ = problem_type["""num_labels"""]
lowercase_ = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.train()
lowercase_ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
if problem_type["num_labels"] > 1:
lowercase_ = inputs["""labels"""].unsqueeze(1 ).repeat(1 , problem_type["""num_labels"""] )
lowercase_ = inputs["""labels"""].to(problem_type["""dtype"""] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=UpperCamelCase__ ) as warning_list:
lowercase_ = model(**UpperCamelCase__ ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F'''Something is going wrong in the regression problem: intercepted {w.message}''' )
loss.backward()
@slow
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ = DeiTModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def UpperCAmelCase_ ( ):
lowercase_ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class UpperCamelCase__ ( unittest.TestCase ):
@cached_property
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
return (
DeiTImageProcessor.from_pretrained("""facebook/deit-base-distilled-patch16-224""" )
if is_vision_available()
else None
)
@slow
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
lowercase_ = DeiTForImageClassificationWithTeacher.from_pretrained("""facebook/deit-base-distilled-patch16-224""" ).to(
UpperCamelCase__ )
lowercase_ = self.default_image_processor
lowercase_ = prepare_img()
lowercase_ = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
lowercase_ = model(**UpperCamelCase__ )
# verify the logits
lowercase_ = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
lowercase_ = torch.tensor([-1.0_266, 0.1_912, -1.2_861] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1e-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ = DeiTModel.from_pretrained(
"""facebook/deit-base-distilled-patch16-224""" , torch_dtype=torch.floataa , device_map="""auto""" )
lowercase_ = self.default_image_processor
lowercase_ = prepare_img()
lowercase_ = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" )
lowercase_ = inputs.pixel_values.to(UpperCamelCase__ )
# forward pass to make sure inference works in fp16
with torch.no_grad():
lowercase_ = model(UpperCamelCase__ )
| 650
|
import os
import re
import warnings
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_ta import TaTokenizer
else:
a = None
a = logging.get_logger(__name__)
a = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
a = {
'vocab_file': {
't5-small': 'https://huggingface.co/t5-small/resolve/main/spiece.model',
't5-base': 'https://huggingface.co/t5-base/resolve/main/spiece.model',
't5-large': 'https://huggingface.co/t5-large/resolve/main/spiece.model',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/spiece.model',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/spiece.model',
},
'tokenizer_file': {
't5-small': 'https://huggingface.co/t5-small/resolve/main/tokenizer.json',
't5-base': 'https://huggingface.co/t5-base/resolve/main/tokenizer.json',
't5-large': 'https://huggingface.co/t5-large/resolve/main/tokenizer.json',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/tokenizer.json',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/tokenizer.json',
},
}
# TODO(PVP) - this should be removed in Transformers v5
a = {
't5-small': 5_1_2,
't5-base': 5_1_2,
't5-large': 5_1_2,
't5-3b': 5_1_2,
't5-11b': 5_1_2,
}
class UpperCamelCase__ ( __magic_name__ ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE : int = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE : str = ['input_ids', 'attention_mask']
__SCREAMING_SNAKE_CASE : Dict = TaTokenizer
__SCREAMING_SNAKE_CASE : List[int] = []
def __init__( self : int , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : Any=None , UpperCamelCase__ : Dict="</s>" , UpperCamelCase__ : Tuple="<unk>" , UpperCamelCase__ : Optional[Any]="<pad>" , UpperCamelCase__ : Union[str, Any]=100 , UpperCamelCase__ : Optional[Any]=None , **UpperCamelCase__ : List[str] , ):
'''simple docstring'''
if extra_ids > 0 and additional_special_tokens is None:
lowercase_ = [F'''<extra_id_{i}>''' for i in range(UpperCamelCase__ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra special tokens
lowercase_ = len(set(filter(lambda UpperCamelCase__ : bool("""extra_id_""" in str(UpperCamelCase__ ) ) , UpperCamelCase__ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'''
""" provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids"""
""" tokens""" )
super().__init__(
UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , extra_ids=UpperCamelCase__ , additional_special_tokens=UpperCamelCase__ , **UpperCamelCase__ , )
lowercase_ = vocab_file
lowercase_ = False if not self.vocab_file else True
lowercase_ = extra_ids
@staticmethod
def UpperCAmelCase__ ( UpperCamelCase__ : Tuple , UpperCamelCase__ : str , UpperCamelCase__ : int ):
'''simple docstring'''
if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes:
lowercase_ = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
"""This tokenizer was incorrectly instantiated with a model max length of"""
F''' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'''
""" behavior is kept to avoid breaking backwards compatibility when padding/encoding with"""
""" `truncation is True`.\n- Be aware that you SHOULD NOT rely on"""
F''' {pretrained_model_name_or_path} automatically truncating your input to'''
F''' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'''
F''' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'''
""" `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please"""
""" instantiate this tokenizer with `model_max_length` set to your preferred value.""" , UpperCamelCase__ , )
return max_model_length
def UpperCAmelCase__ ( self : int , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(UpperCamelCase__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowercase_ = os.path.join(
UpperCamelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ):
copyfile(self.vocab_file , UpperCamelCase__ )
logger.info(F'''Copy vocab file to {out_vocab_file}''' )
return (out_vocab_file,)
def UpperCAmelCase__ ( self : int , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
'''simple docstring'''
lowercase_ = token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return self.prefix_tokens + token_ids_a
else:
lowercase_ = token_ids_a + [self.eos_token_id]
return self.prefix_tokens + token_ids_a + token_ids_a
def UpperCAmelCase__ ( self : str , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
'''simple docstring'''
lowercase_ = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
return list(
set(filter(lambda UpperCamelCase__ : bool(re.search(R"""<extra_id_\d+>""" , UpperCamelCase__ ) ) is not None , self.additional_special_tokens ) ) )
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
return [self.convert_tokens_to_ids(UpperCamelCase__ ) for token in self.get_sentinel_tokens()]
| 650
| 1
|
import copy
import random
from transformers import CLIPTokenizer
class UpperCamelCase__ ( __magic_name__ ):
def __init__( self : Any , *UpperCamelCase__ : Optional[int] , **UpperCamelCase__ : List[str] ):
'''simple docstring'''
super().__init__(*UpperCamelCase__ , **UpperCamelCase__ )
lowercase_ = {}
def UpperCAmelCase__ ( self : Dict , UpperCamelCase__ : Optional[Any] , *UpperCamelCase__ : Union[str, Any] , **UpperCamelCase__ : str ):
'''simple docstring'''
lowercase_ = super().add_tokens(UpperCamelCase__ , *UpperCamelCase__ , **UpperCamelCase__ )
if num_added_tokens == 0:
raise ValueError(
F'''The tokenizer already contains the token {placeholder_token}. Please pass a different'''
""" `placeholder_token` that is not already in the tokenizer.""" )
def UpperCAmelCase__ ( self : Any , UpperCamelCase__ : Optional[Any] , *UpperCamelCase__ : str , UpperCamelCase__ : Optional[int]=1 , **UpperCamelCase__ : int ):
'''simple docstring'''
lowercase_ = []
if num_vec_per_token == 1:
self.try_adding_tokens(UpperCamelCase__ , *UpperCamelCase__ , **UpperCamelCase__ )
output.append(UpperCamelCase__ )
else:
lowercase_ = []
for i in range(UpperCamelCase__ ):
lowercase_ = placeholder_token + F'''_{i}'''
self.try_adding_tokens(UpperCamelCase__ , *UpperCamelCase__ , **UpperCamelCase__ )
output.append(UpperCamelCase__ )
# handle cases where there is a new placeholder token that contains the current placeholder token but is larger
for token in self.token_map:
if token in placeholder_token:
raise ValueError(
F'''The tokenizer already has placeholder token {token} that can get confused with'''
F''' {placeholder_token}keep placeholder tokens independent''' )
lowercase_ = output
def UpperCAmelCase__ ( self : Union[str, Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple=False , UpperCamelCase__ : Tuple=1.0 ):
'''simple docstring'''
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
lowercase_ = []
for i in range(len(UpperCamelCase__ ) ):
output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=UpperCamelCase__ ) )
return output
for placeholder_token in self.token_map:
if placeholder_token in text:
lowercase_ = self.token_map[placeholder_token]
lowercase_ = tokens[: 1 + int(len(UpperCamelCase__ ) * prop_tokens_to_load )]
if vector_shuffle:
lowercase_ = copy.copy(UpperCamelCase__ )
random.shuffle(UpperCamelCase__ )
lowercase_ = text.replace(UpperCamelCase__ , """ """.join(UpperCamelCase__ ) )
return text
def __call__( self : Union[str, Any] , UpperCamelCase__ : Optional[int] , *UpperCamelCase__ : Any , UpperCamelCase__ : int=False , UpperCamelCase__ : List[str]=1.0 , **UpperCamelCase__ : List[str] ):
'''simple docstring'''
return super().__call__(
self.replace_placeholder_tokens_in_text(
UpperCamelCase__ , vector_shuffle=UpperCamelCase__ , prop_tokens_to_load=UpperCamelCase__ ) , *UpperCamelCase__ , **UpperCamelCase__ , )
def UpperCAmelCase__ ( self : List[Any] , UpperCamelCase__ : List[Any] , *UpperCamelCase__ : Tuple , UpperCamelCase__ : Any=False , UpperCamelCase__ : int=1.0 , **UpperCamelCase__ : int ):
'''simple docstring'''
return super().encode(
self.replace_placeholder_tokens_in_text(
UpperCamelCase__ , vector_shuffle=UpperCamelCase__ , prop_tokens_to_load=UpperCamelCase__ ) , *UpperCamelCase__ , **UpperCamelCase__ , )
| 650
|
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMInverseScheduler,
DDIMScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
StableDiffusionDiffEditPipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_image, slow
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class UpperCamelCase__ ( __magic_name__ , __magic_name__ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Optional[int] = StableDiffusionDiffEditPipeline
__SCREAMING_SNAKE_CASE : Optional[Any] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'height', 'width', 'image'} | {'image_latents'}
__SCREAMING_SNAKE_CASE : List[str] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {'image'} | {'image_latents'}
__SCREAMING_SNAKE_CASE : int = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__SCREAMING_SNAKE_CASE : Any = frozenset([] )
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=UpperCamelCase__ , )
lowercase_ = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=UpperCamelCase__ , set_alpha_to_one=UpperCamelCase__ , )
lowercase_ = DDIMInverseScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=UpperCamelCase__ , set_alpha_to_zero=UpperCamelCase__ , )
torch.manual_seed(0 )
lowercase_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
lowercase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="""gelu""" , projection_dim=512 , )
lowercase_ = CLIPTextModel(UpperCamelCase__ )
lowercase_ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
lowercase_ = {
"""unet""": unet,
"""scheduler""": scheduler,
"""inverse_scheduler""": inverse_scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def UpperCAmelCase__ ( self : int , UpperCamelCase__ : Any , UpperCamelCase__ : Any=0 ):
'''simple docstring'''
lowercase_ = floats_tensor((1, 16, 16) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
lowercase_ = floats_tensor((1, 2, 4, 16, 16) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
if str(UpperCamelCase__ ).startswith("""mps""" ):
lowercase_ = torch.manual_seed(UpperCamelCase__ )
else:
lowercase_ = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
lowercase_ = {
"""prompt""": """a dog and a newt""",
"""mask_image""": mask,
"""image_latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 2,
"""inpaint_strength""": 1.0,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def UpperCAmelCase__ ( self : Optional[int] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : str=0 ):
'''simple docstring'''
lowercase_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
lowercase_ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowercase_ = Image.fromarray(np.uinta(UpperCamelCase__ ) ).convert("""RGB""" )
if str(UpperCamelCase__ ).startswith("""mps""" ):
lowercase_ = torch.manual_seed(UpperCamelCase__ )
else:
lowercase_ = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
lowercase_ = {
"""image""": image,
"""source_prompt""": """a cat and a frog""",
"""target_prompt""": """a dog and a newt""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""num_maps_per_mask""": 2,
"""mask_encode_strength""": 1.0,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def UpperCAmelCase__ ( self : List[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple=0 ):
'''simple docstring'''
lowercase_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
lowercase_ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowercase_ = Image.fromarray(np.uinta(UpperCamelCase__ ) ).convert("""RGB""" )
if str(UpperCamelCase__ ).startswith("""mps""" ):
lowercase_ = torch.manual_seed(UpperCamelCase__ )
else:
lowercase_ = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
lowercase_ = {
"""image""": image,
"""prompt""": """a cat and a frog""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""inpaint_strength""": 1.0,
"""guidance_scale""": 6.0,
"""decode_latents""": True,
"""output_type""": """numpy""",
}
return inputs
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
if not hasattr(self.pipeline_class , """_optional_components""" ):
return
lowercase_ = self.get_dummy_components()
lowercase_ = self.pipeline_class(**UpperCamelCase__ )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
# set all optional components to None and update pipeline config accordingly
for optional_component in pipe._optional_components:
setattr(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} )
lowercase_ = self.get_dummy_inputs(UpperCamelCase__ )
lowercase_ = pipe(**UpperCamelCase__ )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(UpperCamelCase__ )
lowercase_ = self.pipeline_class.from_pretrained(UpperCamelCase__ )
pipe_loaded.to(UpperCamelCase__ )
pipe_loaded.set_progress_bar_config(disable=UpperCamelCase__ )
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(UpperCamelCase__ , UpperCamelCase__ ) is None , F'''`{optional_component}` did not stay set to None after loading.''' , )
lowercase_ = self.get_dummy_inputs(UpperCamelCase__ )
lowercase_ = pipe_loaded(**UpperCamelCase__ )[0]
lowercase_ = np.abs(output - output_loaded ).max()
self.assertLess(UpperCamelCase__ , 1e-4 )
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase_ = """cpu"""
lowercase_ = self.get_dummy_components()
lowercase_ = self.pipeline_class(**UpperCamelCase__ )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
lowercase_ = self.get_dummy_mask_inputs(UpperCamelCase__ )
lowercase_ = pipe.generate_mask(**UpperCamelCase__ )
lowercase_ = mask[0, -3:, -3:]
self.assertEqual(mask.shape , (1, 16, 16) )
lowercase_ = np.array([0] * 9 )
lowercase_ = np.abs(mask_slice.flatten() - expected_slice ).max()
self.assertLessEqual(UpperCamelCase__ , 1e-3 )
self.assertEqual(mask[0, -3, -4] , 0 )
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ = """cpu"""
lowercase_ = self.get_dummy_components()
lowercase_ = self.pipeline_class(**UpperCamelCase__ )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
lowercase_ = self.get_dummy_inversion_inputs(UpperCamelCase__ )
lowercase_ = pipe.invert(**UpperCamelCase__ ).images
lowercase_ = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
lowercase_ = np.array(
[0.5_150, 0.5_134, 0.5_043, 0.5_376, 0.4_694, 0.51_050, 0.5_015, 0.4_407, 0.4_799] , )
lowercase_ = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(UpperCamelCase__ , 1e-3 )
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=5e-3 )
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ = """cpu"""
lowercase_ = self.get_dummy_components()
lowercase_ = {"""beta_start""": 0.00_085, """beta_end""": 0.012, """beta_schedule""": """scaled_linear"""}
lowercase_ = DPMSolverMultistepScheduler(**UpperCamelCase__ )
lowercase_ = DPMSolverMultistepInverseScheduler(**UpperCamelCase__ )
lowercase_ = self.pipeline_class(**UpperCamelCase__ )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
lowercase_ = self.get_dummy_inversion_inputs(UpperCamelCase__ )
lowercase_ = pipe.invert(**UpperCamelCase__ ).images
lowercase_ = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
lowercase_ = np.array(
[0.5_150, 0.5_134, 0.5_043, 0.5_376, 0.4_694, 0.51_050, 0.5_015, 0.4_407, 0.4_799] , )
lowercase_ = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(UpperCamelCase__ , 1e-3 )
@require_torch_gpu
@slow
class UpperCamelCase__ ( unittest.TestCase ):
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@classmethod
def UpperCAmelCase__ ( cls : Dict ):
'''simple docstring'''
lowercase_ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png""" )
lowercase_ = raw_image.convert("""RGB""" ).resize((768, 768) )
lowercase_ = raw_image
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
lowercase_ = torch.manual_seed(0 )
lowercase_ = StableDiffusionDiffEditPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2-1""" , safety_checker=UpperCamelCase__ , torch_dtype=torch.floataa )
lowercase_ = DDIMScheduler.from_config(pipe.scheduler.config )
lowercase_ = DDIMInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
lowercase_ = """a bowl of fruit"""
lowercase_ = """a bowl of pears"""
lowercase_ = pipe.generate_mask(
image=self.raw_image , source_prompt=UpperCamelCase__ , target_prompt=UpperCamelCase__ , generator=UpperCamelCase__ , )
lowercase_ = pipe.invert(
prompt=UpperCamelCase__ , image=self.raw_image , inpaint_strength=0.7 , generator=UpperCamelCase__ ).latents
lowercase_ = pipe(
prompt=UpperCamelCase__ , mask_image=UpperCamelCase__ , image_latents=UpperCamelCase__ , generator=UpperCamelCase__ , negative_prompt=UpperCamelCase__ , inpaint_strength=0.7 , output_type="""numpy""" , ).images[0]
lowercase_ = (
np.array(
load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/diffedit/pears.png""" ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5e-1
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
lowercase_ = torch.manual_seed(0 )
lowercase_ = StableDiffusionDiffEditPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2-1""" , safety_checker=UpperCamelCase__ , torch_dtype=torch.floataa )
lowercase_ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
lowercase_ = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
lowercase_ = """a bowl of fruit"""
lowercase_ = """a bowl of pears"""
lowercase_ = pipe.generate_mask(
image=self.raw_image , source_prompt=UpperCamelCase__ , target_prompt=UpperCamelCase__ , generator=UpperCamelCase__ , )
lowercase_ = pipe.invert(
prompt=UpperCamelCase__ , image=self.raw_image , inpaint_strength=0.7 , generator=UpperCamelCase__ , num_inference_steps=25 , ).latents
lowercase_ = pipe(
prompt=UpperCamelCase__ , mask_image=UpperCamelCase__ , image_latents=UpperCamelCase__ , generator=UpperCamelCase__ , negative_prompt=UpperCamelCase__ , inpaint_strength=0.7 , num_inference_steps=25 , output_type="""numpy""" , ).images[0]
lowercase_ = (
np.array(
load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/diffedit/pears.png""" ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5e-1
| 650
| 1
|
from string import ascii_uppercase
a = {char: i for i, char in enumerate(ascii_uppercase)}
a = dict(enumerate(ascii_uppercase))
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase_ = len(UpperCAmelCase__ )
lowercase_ = 0
while True:
if x == i:
lowercase_ = 0
if len(UpperCAmelCase__ ) == len(UpperCAmelCase__ ):
break
key += key[i]
i += 1
return key
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase_ = """"""
lowercase_ = 0
for letter in message:
if letter == " ":
cipher_text += " "
else:
lowercase_ = (dicta[letter] - dicta[key_new[i]]) % 2_6
i += 1
cipher_text += dicta[x]
return cipher_text
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase_ = """"""
lowercase_ = 0
for letter in cipher_text:
if letter == " ":
or_txt += " "
else:
lowercase_ = (dicta[letter] + dicta[key_new[i]] + 2_6) % 2_6
i += 1
or_txt += dicta[x]
return or_txt
def UpperCAmelCase_ ( ):
lowercase_ = """THE GERMAN ATTACK"""
lowercase_ = """SECRET"""
lowercase_ = generate_key(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase_ = cipher_text(UpperCAmelCase__ , UpperCAmelCase__ )
print(F'''Encrypted Text = {s}''' )
print(F'''Original Text = {original_text(UpperCAmelCase__ , UpperCAmelCase__ )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 650
|
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
a = logging.get_logger(__name__)
class UpperCamelCase__ ( __magic_name__ ):
__SCREAMING_SNAKE_CASE : str = ['pixel_values']
def __init__( self : List[Any] , UpperCamelCase__ : bool = True , UpperCamelCase__ : Union[int, float] = 1 / 255 , UpperCamelCase__ : bool = True , UpperCamelCase__ : int = 8 , **UpperCamelCase__ : Optional[Any] , ):
'''simple docstring'''
super().__init__(**UpperCamelCase__ )
lowercase_ = do_rescale
lowercase_ = rescale_factor
lowercase_ = do_pad
lowercase_ = pad_size
def UpperCAmelCase__ ( self : int , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : float , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
return rescale(UpperCamelCase__ , scale=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def UpperCAmelCase__ ( self : List[Any] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : int , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None ):
'''simple docstring'''
lowercase_ , lowercase_ = get_image_size(UpperCamelCase__ )
lowercase_ = (old_height // size + 1) * size - old_height
lowercase_ = (old_width // size + 1) * size - old_width
return pad(UpperCamelCase__ , ((0, pad_height), (0, pad_width)) , mode="""symmetric""" , data_format=UpperCamelCase__ )
def UpperCAmelCase__ ( self : Optional[int] , UpperCamelCase__ : ImageInput , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[float] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , UpperCamelCase__ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **UpperCamelCase__ : Dict , ):
'''simple docstring'''
lowercase_ = do_rescale if do_rescale is not None else self.do_rescale
lowercase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase_ = do_pad if do_pad is not None else self.do_pad
lowercase_ = pad_size if pad_size is not None else self.pad_size
lowercase_ = make_list_of_images(UpperCamelCase__ )
if not valid_images(UpperCamelCase__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
# All transformations expect numpy arrays.
lowercase_ = [to_numpy_array(UpperCamelCase__ ) for image in images]
if do_rescale:
lowercase_ = [self.rescale(image=UpperCamelCase__ , scale=UpperCamelCase__ ) for image in images]
if do_pad:
lowercase_ = [self.pad(UpperCamelCase__ , size=UpperCamelCase__ ) for image in images]
lowercase_ = [to_channel_dimension_format(UpperCamelCase__ , UpperCamelCase__ ) for image in images]
lowercase_ = {"""pixel_values""": images}
return BatchFeature(data=UpperCamelCase__ , tensor_type=UpperCamelCase__ )
| 650
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.