code
stringlengths 87
55.2k
| code_codestyle
int64 0
349
| style_context
stringlengths 135
49.1k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class lowercase ( SCREAMING_SNAKE_CASE__ ):
lowercase_ : jnp.ndarray
lowercase_ : jnp.ndarray
class lowercase ( nn.Module ):
lowercase_ : int
lowercase_ : Tuple[int] =(16, 32, 96, 256)
lowercase_ : jnp.dtype =jnp.floataa
def A__ ( self):
lowercase = nn.Conv(
self.block_out_channels[0] ,kernel_size=(3, 3) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
lowercase = []
for i in range(len(self.block_out_channels) - 1):
lowercase = self.block_out_channels[i]
lowercase = self.block_out_channels[i + 1]
lowercase = nn.Conv(
A__ ,kernel_size=(3, 3) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
blocks.append(A__)
lowercase = nn.Conv(
A__ ,kernel_size=(3, 3) ,strides=(2, 2) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
blocks.append(A__)
lowercase = blocks
lowercase = nn.Conv(
self.conditioning_embedding_channels ,kernel_size=(3, 3) ,padding=((1, 1), (1, 1)) ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,)
def __call__( self ,A__):
lowercase = self.conv_in(A__)
lowercase = nn.silu(A__)
for block in self.blocks:
lowercase = block(A__)
lowercase = nn.silu(A__)
lowercase = self.conv_out(A__)
return embedding
@flax_register_to_config
class lowercase ( nn.Module , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase_ : int =32
lowercase_ : int =4
lowercase_ : Tuple[str] =(
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
lowercase_ : Union[bool, Tuple[bool]] =False
lowercase_ : Tuple[int] =(320, 640, 1280, 1280)
lowercase_ : int =2
lowercase_ : Union[int, Tuple[int]] =8
lowercase_ : Optional[Union[int, Tuple[int]]] =None
lowercase_ : int =1280
lowercase_ : float =0.0
lowercase_ : bool =False
lowercase_ : jnp.dtype =jnp.floataa
lowercase_ : bool =True
lowercase_ : int =0
lowercase_ : str ="rgb"
lowercase_ : Tuple[int] =(16, 32, 96, 256)
def A__ ( self ,A__):
# init input tensors
lowercase = (1, self.in_channels, self.sample_size, self.sample_size)
lowercase = jnp.zeros(A__ ,dtype=jnp.floataa)
lowercase = jnp.ones((1,) ,dtype=jnp.intaa)
lowercase = jnp.zeros((1, 1, self.cross_attention_dim) ,dtype=jnp.floataa)
lowercase = (1, 3, self.sample_size * 8, self.sample_size * 8)
lowercase = jnp.zeros(A__ ,dtype=jnp.floataa)
lowercase , lowercase = jax.random.split(A__)
lowercase = {'''params''': params_rng, '''dropout''': dropout_rng}
return self.init(A__ ,A__ ,A__ ,A__ ,A__)["params"]
def A__ ( self):
lowercase = self.block_out_channels
lowercase = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
lowercase = self.num_attention_heads or self.attention_head_dim
# input
lowercase = nn.Conv(
block_out_channels[0] ,kernel_size=(3, 3) ,strides=(1, 1) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
# time
lowercase = FlaxTimesteps(
block_out_channels[0] ,flip_sin_to_cos=self.flip_sin_to_cos ,freq_shift=self.config.freq_shift)
lowercase = FlaxTimestepEmbedding(A__ ,dtype=self.dtype)
lowercase = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] ,block_out_channels=self.conditioning_embedding_out_channels ,)
lowercase = self.only_cross_attention
if isinstance(A__ ,A__):
lowercase = (only_cross_attention,) * len(self.down_block_types)
if isinstance(A__ ,A__):
lowercase = (num_attention_heads,) * len(self.down_block_types)
# down
lowercase = []
lowercase = []
lowercase = block_out_channels[0]
lowercase = nn.Conv(
A__ ,kernel_size=(1, 1) ,padding='''VALID''' ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,)
controlnet_down_blocks.append(A__)
for i, down_block_type in enumerate(self.down_block_types):
lowercase = output_channel
lowercase = block_out_channels[i]
lowercase = i == len(A__) - 1
if down_block_type == "CrossAttnDownBlock2D":
lowercase = FlaxCrossAttnDownBlockaD(
in_channels=A__ ,out_channels=A__ ,dropout=self.dropout ,num_layers=self.layers_per_block ,num_attention_heads=num_attention_heads[i] ,add_downsample=not is_final_block ,use_linear_projection=self.use_linear_projection ,only_cross_attention=only_cross_attention[i] ,dtype=self.dtype ,)
else:
lowercase = FlaxDownBlockaD(
in_channels=A__ ,out_channels=A__ ,dropout=self.dropout ,num_layers=self.layers_per_block ,add_downsample=not is_final_block ,dtype=self.dtype ,)
down_blocks.append(A__)
for _ in range(self.layers_per_block):
lowercase = nn.Conv(
A__ ,kernel_size=(1, 1) ,padding='''VALID''' ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,)
controlnet_down_blocks.append(A__)
if not is_final_block:
lowercase = nn.Conv(
A__ ,kernel_size=(1, 1) ,padding='''VALID''' ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,)
controlnet_down_blocks.append(A__)
lowercase = down_blocks
lowercase = controlnet_down_blocks
# mid
lowercase = block_out_channels[-1]
lowercase = FlaxUNetMidBlockaDCrossAttn(
in_channels=A__ ,dropout=self.dropout ,num_attention_heads=num_attention_heads[-1] ,use_linear_projection=self.use_linear_projection ,dtype=self.dtype ,)
lowercase = nn.Conv(
A__ ,kernel_size=(1, 1) ,padding='''VALID''' ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,)
def __call__( self ,A__ ,A__ ,A__ ,A__ ,A__ = 1.0 ,A__ = True ,A__ = False ,):
lowercase = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
lowercase = jnp.flip(A__ ,axis=1)
# 1. time
if not isinstance(A__ ,jnp.ndarray):
lowercase = jnp.array([timesteps] ,dtype=jnp.intaa)
elif isinstance(A__ ,jnp.ndarray) and len(timesteps.shape) == 0:
lowercase = timesteps.astype(dtype=jnp.floataa)
lowercase = jnp.expand_dims(A__ ,0)
lowercase = self.time_proj(A__)
lowercase = self.time_embedding(A__)
# 2. pre-process
lowercase = jnp.transpose(A__ ,(0, 2, 3, 1))
lowercase = self.conv_in(A__)
lowercase = jnp.transpose(A__ ,(0, 2, 3, 1))
lowercase = self.controlnet_cond_embedding(A__)
sample += controlnet_cond
# 3. down
lowercase = (sample,)
for down_block in self.down_blocks:
if isinstance(A__ ,A__):
lowercase , lowercase = down_block(A__ ,A__ ,A__ ,deterministic=not train)
else:
lowercase , lowercase = down_block(A__ ,A__ ,deterministic=not train)
down_block_res_samples += res_samples
# 4. mid
lowercase = self.mid_block(A__ ,A__ ,A__ ,deterministic=not train)
# 5. contronet blocks
lowercase = ()
for down_block_res_sample, controlnet_block in zip(A__ ,self.controlnet_down_blocks):
lowercase = controlnet_block(A__)
controlnet_down_block_res_samples += (down_block_res_sample,)
lowercase = controlnet_down_block_res_samples
lowercase = self.controlnet_mid_block(A__)
# 6. scaling
lowercase = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=A__ ,mid_block_res_sample=A__)
| 101
|
import argparse
import json
from pathlib import Path
import torch
import torchaudio
from datasets import load_dataset
from huggingface_hub import hf_hub_download
from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase__ = logging.get_logger(__name__)
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
__a = ASTConfig()
if "10-10" in model_name:
pass
elif "speech-commands" in model_name:
__a = 128
elif "12-12" in model_name:
__a = 12
__a = 12
elif "14-14" in model_name:
__a = 14
__a = 14
elif "16-16" in model_name:
__a = 16
__a = 16
else:
raise ValueError("""Model not supported""" )
__a = """huggingface/label-files"""
if "speech-commands" in model_name:
__a = 35
__a = """speech-commands-v2-id2label.json"""
else:
__a = 527
__a = """audioset-id2label.json"""
__a = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type="""dataset""" ) , """r""" ) )
__a = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
__a = idalabel
__a = {v: k for k, v in idalabel.items()}
return config
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
if "module.v" in name:
__a = name.replace("""module.v""" , """audio_spectrogram_transformer""" )
if "cls_token" in name:
__a = name.replace("""cls_token""" , """embeddings.cls_token""" )
if "dist_token" in name:
__a = name.replace("""dist_token""" , """embeddings.distillation_token""" )
if "pos_embed" in name:
__a = name.replace("""pos_embed""" , """embeddings.position_embeddings""" )
if "patch_embed.proj" in name:
__a = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
# transformer blocks
if "blocks" in name:
__a = name.replace("""blocks""" , """encoder.layer""" )
if "attn.proj" in name:
__a = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
__a = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
__a = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
__a = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
__a = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
__a = name.replace("""mlp.fc2""" , """output.dense""" )
# final layernorm
if "audio_spectrogram_transformer.norm" in name:
__a = name.replace("""audio_spectrogram_transformer.norm""" , """audio_spectrogram_transformer.layernorm""" )
# classifier head
if "module.mlp_head.0" in name:
__a = name.replace("""module.mlp_head.0""" , """classifier.layernorm""" )
if "module.mlp_head.1" in name:
__a = name.replace("""module.mlp_head.1""" , """classifier.dense""" )
return name
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Union[str, Any] ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
__a = orig_state_dict.pop(_SCREAMING_SNAKE_CASE )
if "qkv" in key:
__a = key.split(""".""" )
__a = int(key_split[3] )
__a = config.hidden_size
if "weight" in key:
__a = val[:dim, :]
__a = val[dim : dim * 2, :]
__a = val[-dim:, :]
else:
__a = val[:dim]
__a = val[dim : dim * 2]
__a = val[-dim:]
else:
__a = val
return orig_state_dict
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[str] ):
"""simple docstring"""
__a = [
"""module.v.head.weight""",
"""module.v.head.bias""",
"""module.v.head_dist.weight""",
"""module.v.head_dist.bias""",
]
for k in ignore_keys:
state_dict.pop(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@torch.no_grad()
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : List[str]=False ):
"""simple docstring"""
__a = get_audio_spectrogram_transformer_config(_SCREAMING_SNAKE_CASE )
__a = {
"""ast-finetuned-audioset-10-10-0.4593""": (
"""https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1"""
),
"""ast-finetuned-audioset-10-10-0.450""": (
"""https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1"""
),
"""ast-finetuned-audioset-10-10-0.448""": (
"""https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1"""
),
"""ast-finetuned-audioset-10-10-0.448-v2""": (
"""https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1"""
),
"""ast-finetuned-audioset-12-12-0.447""": (
"""https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1"""
),
"""ast-finetuned-audioset-14-14-0.443""": (
"""https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1"""
),
"""ast-finetuned-audioset-16-16-0.442""": (
"""https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1"""
),
"""ast-finetuned-speech-commands-v2""": (
"""https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1"""
),
}
# load original state_dict
__a = model_name_to_url[model_name]
__a = torch.hub.load_state_dict_from_url(_SCREAMING_SNAKE_CASE , map_location="""cpu""" )
# remove some keys
remove_keys(_SCREAMING_SNAKE_CASE )
# rename some keys
__a = convert_state_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# load 🤗 model
__a = ASTForAudioClassification(_SCREAMING_SNAKE_CASE )
model.eval()
model.load_state_dict(_SCREAMING_SNAKE_CASE )
# verify outputs on dummy input
# source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62
__a = -4.267_7393 if """speech-commands""" not in model_name else -6.84_5978
__a = 4.568_9974 if """speech-commands""" not in model_name else 5.565_4526
__a = 1024 if """speech-commands""" not in model_name else 128
__a = ASTFeatureExtractor(mean=_SCREAMING_SNAKE_CASE , std=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE )
if "speech-commands" in model_name:
__a = load_dataset("""speech_commands""" , """v0.02""" , split="""validation""" )
__a = dataset[0]["""audio"""]["""array"""]
else:
__a = hf_hub_download(
repo_id="""nielsr/audio-spectogram-transformer-checkpoint""" , filename="""sample_audio.flac""" , repo_type="""dataset""" , )
__a , __a = torchaudio.load(_SCREAMING_SNAKE_CASE )
__a = waveform.squeeze().numpy()
__a = feature_extractor(_SCREAMING_SNAKE_CASE , sampling_rate=1_6000 , return_tensors="""pt""" )
# forward pass
__a = model(**_SCREAMING_SNAKE_CASE )
__a = outputs.logits
if model_name == "ast-finetuned-audioset-10-10-0.4593":
__a = torch.tensor([-0.8760, -7.0042, -8.6602] )
elif model_name == "ast-finetuned-audioset-10-10-0.450":
__a = torch.tensor([-1.1986, -7.0903, -8.2718] )
elif model_name == "ast-finetuned-audioset-10-10-0.448":
__a = torch.tensor([-2.6128, -8.0080, -9.4344] )
elif model_name == "ast-finetuned-audioset-10-10-0.448-v2":
__a = torch.tensor([-1.5080, -7.4534, -8.8917] )
elif model_name == "ast-finetuned-audioset-12-12-0.447":
__a = torch.tensor([-0.5050, -6.5833, -8.0843] )
elif model_name == "ast-finetuned-audioset-14-14-0.443":
__a = torch.tensor([-0.3826, -7.0336, -8.2413] )
elif model_name == "ast-finetuned-audioset-16-16-0.442":
__a = torch.tensor([-1.2113, -6.9101, -8.3470] )
elif model_name == "ast-finetuned-speech-commands-v2":
__a = torch.tensor([6.1589, -8.0566, -8.7984] )
else:
raise ValueError("""Unknown model name""" )
if not torch.allclose(logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 ):
raise ValueError("""Logits don't match""" )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
print(f"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
print(f"Saving feature extractor to {pytorch_dump_folder_path}" )
feature_extractor.save_pretrained(_SCREAMING_SNAKE_CASE )
if push_to_hub:
print("""Pushing model and feature extractor to the hub...""" )
model.push_to_hub(f"MIT/{model_name}" )
feature_extractor.push_to_hub(f"MIT/{model_name}" )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""ast-finetuned-audioset-10-10-0.4593""",
type=str,
help="""Name of the Audio Spectrogram Transformer model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
lowerCamelCase__ = parser.parse_args()
convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 302
| 0
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_dpt import DPTImageProcessor
SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
def __init__(self , *a_ , **a_ ):
'''simple docstring'''
warnings.warn(
'''The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use DPTImageProcessor instead.''' , a_ , )
super().__init__(*a_ , **a_ )
| 102
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_albert import AlbertTokenizer
else:
lowerCamelCase__ = None
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
lowerCamelCase__ = {
"""vocab_file""": {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/spiece.model""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/spiece.model""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/spiece.model""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/spiece.model""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model""",
},
"""tokenizer_file""": {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json""",
},
}
lowerCamelCase__ = {
"""albert-base-v1""": 512,
"""albert-large-v1""": 512,
"""albert-xlarge-v1""": 512,
"""albert-xxlarge-v1""": 512,
"""albert-base-v2""": 512,
"""albert-large-v2""": 512,
"""albert-xlarge-v2""": 512,
"""albert-xxlarge-v2""": 512,
}
lowerCamelCase__ = """▁"""
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
__lowerCamelCase : List[Any] =VOCAB_FILES_NAMES
__lowerCamelCase : Union[str, Any] =PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase : List[str] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase : Any =AlbertTokenizer
def __init__( self : Tuple , __lowercase : Union[str, Any]=None , __lowercase : Optional[int]=None , __lowercase : int=True , __lowercase : Dict=True , __lowercase : str=False , __lowercase : str="[CLS]" , __lowercase : List[Any]="[SEP]" , __lowercase : Any="<unk>" , __lowercase : List[Any]="[SEP]" , __lowercase : List[Any]="<pad>" , __lowercase : Optional[Any]="[CLS]" , __lowercase : List[str]="[MASK]" , **__lowercase : str , ):
'''simple docstring'''
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
__a = (
AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase , normalized=__lowercase )
if isinstance(__lowercase , __lowercase )
else mask_token
)
super().__init__(
__lowercase , tokenizer_file=__lowercase , do_lower_case=__lowercase , remove_space=__lowercase , keep_accents=__lowercase , bos_token=__lowercase , eos_token=__lowercase , unk_token=__lowercase , sep_token=__lowercase , pad_token=__lowercase , cls_token=__lowercase , mask_token=__lowercase , **__lowercase , )
__a = do_lower_case
__a = remove_space
__a = keep_accents
__a = vocab_file
__a = False if not self.vocab_file else True
def UpperCamelCase_ ( self : Dict , __lowercase : List[int] , __lowercase : Optional[List[int]] = None ):
'''simple docstring'''
__a = [self.sep_token_id]
__a = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCamelCase_ ( self : str , __lowercase : List[int] , __lowercase : Optional[List[int]] = None ):
'''simple docstring'''
__a = [self.sep_token_id]
__a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase_ ( self : Tuple , __lowercase : str , __lowercase : Optional[str] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(__lowercase ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
__a = os.path.join(
__lowercase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowercase ):
copyfile(self.vocab_file , __lowercase )
return (out_vocab_file,)
| 302
| 0
|
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class __snake_case :
_a = 42
_a = None
_a = None
def UpperCamelCase( ):
lowerCAmelCase_ : Any = Node(1 )
lowerCAmelCase_ : List[str] = Node(2 )
lowerCAmelCase_ : Optional[Any] = Node(3 )
lowerCAmelCase_ : int = Node(4 )
lowerCAmelCase_ : Any = Node(5 )
return tree
def UpperCamelCase( __UpperCamelCase : Node | None ):
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def UpperCamelCase( __UpperCamelCase : Node | None ):
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def UpperCamelCase( __UpperCamelCase : Node | None ):
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def UpperCamelCase( __UpperCamelCase : Node | None ):
return (max(height(root.left ) ,height(root.right ) ) + 1) if root else 0
def UpperCamelCase( __UpperCamelCase : Node | None ):
lowerCAmelCase_ : list[Any] = []
if root is None:
return output
lowerCAmelCase_ : Optional[Any] = deque([root] )
while process_queue:
lowerCAmelCase_ : Optional[int] = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def UpperCamelCase( __UpperCamelCase : Node | None ,__UpperCamelCase : int ):
lowerCAmelCase_ : list[Any] = []
def populate_output(__UpperCamelCase : Node | None ,__UpperCamelCase : int ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left ,level - 1 )
populate_output(root.right ,level - 1 )
populate_output(__UpperCamelCase ,__UpperCamelCase )
return output
def UpperCamelCase( __UpperCamelCase : Node | None ,__UpperCamelCase : int ):
lowerCAmelCase_ : list[Any] = []
def populate_output(__UpperCamelCase : Node | None ,__UpperCamelCase : int ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right ,level - 1 )
populate_output(root.left ,level - 1 )
populate_output(__UpperCamelCase ,__UpperCamelCase )
return output
def UpperCamelCase( __UpperCamelCase : Node | None ):
if root is None:
return []
lowerCAmelCase_ : list[Sequence[Node | None]] = []
lowerCAmelCase_ : Any = 0
lowerCAmelCase_ : List[str] = height(__UpperCamelCase )
for h in range(1 ,height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(__UpperCamelCase ,__UpperCamelCase ) )
lowerCAmelCase_ : str = 1
else:
output.append(get_nodes_from_right_to_left(__UpperCamelCase ,__UpperCamelCase ) )
lowerCAmelCase_ : Optional[Any] = 0
return output
def UpperCamelCase( ): # Main function for testing.
lowerCAmelCase_ : Any = make_tree()
print(f"""In-order Traversal: {inorder(__UpperCamelCase )}""" )
print(f"""Pre-order Traversal: {preorder(__UpperCamelCase )}""" )
print(f"""Post-order Traversal: {postorder(__UpperCamelCase )}""" ,'''\n''' )
print(f"""Height of Tree: {height(__UpperCamelCase )}""" ,'''\n''' )
print('''Complete Level Order Traversal: ''' )
print(level_order(__UpperCamelCase ) ,'''\n''' )
print('''Level-wise order Traversal: ''' )
for level in range(1 ,height(__UpperCamelCase ) + 1 ):
print(f"""Level {level}:""" ,get_nodes_from_left_to_right(__UpperCamelCase ,level=__UpperCamelCase ) )
print('''\nZigZag order Traversal: ''' )
print(zigzag(__UpperCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 103
|
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
__lowerCamelCase : Optional[int] =(IPNDMScheduler,)
__lowerCamelCase : int =(('num_inference_steps', 50),)
def UpperCamelCase_ ( self : str , **__lowercase : Dict ):
'''simple docstring'''
__a = {"""num_train_timesteps""": 1000}
config.update(**__lowercase )
return config
def UpperCamelCase_ ( self : Any , __lowercase : Tuple=0 , **__lowercase : Dict ):
'''simple docstring'''
__a = dict(self.forward_default_kwargs )
__a = kwargs.pop("""num_inference_steps""" , __lowercase )
__a = self.dummy_sample
__a = 0.1 * sample
__a = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
__a = self.get_scheduler_config(**__lowercase )
__a = scheduler_class(**__lowercase )
scheduler.set_timesteps(__lowercase )
# copy over dummy past residuals
__a = dummy_past_residuals[:]
if time_step is None:
__a = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__lowercase )
__a = scheduler_class.from_pretrained(__lowercase )
new_scheduler.set_timesteps(__lowercase )
# copy over dummy past residuals
__a = dummy_past_residuals[:]
__a = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
__a = new_scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
__a = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
__a = new_scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self : str , __lowercase : int=0 , **__lowercase : Dict ):
'''simple docstring'''
__a = dict(self.forward_default_kwargs )
__a = kwargs.pop("""num_inference_steps""" , __lowercase )
__a = self.dummy_sample
__a = 0.1 * sample
__a = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
__a = self.get_scheduler_config()
__a = scheduler_class(**__lowercase )
scheduler.set_timesteps(__lowercase )
# copy over dummy past residuals (must be after setting timesteps)
__a = dummy_past_residuals[:]
if time_step is None:
__a = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__lowercase )
__a = scheduler_class.from_pretrained(__lowercase )
# copy over dummy past residuals
new_scheduler.set_timesteps(__lowercase )
# copy over dummy past residual (must be after setting timesteps)
__a = dummy_past_residuals[:]
__a = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
__a = new_scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
__a = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
__a = new_scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCamelCase_ ( self : List[str] , **__lowercase : Dict ):
'''simple docstring'''
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config(**__lowercase )
__a = scheduler_class(**__lowercase )
__a = 10
__a = self.dummy_model()
__a = self.dummy_sample_deter
scheduler.set_timesteps(__lowercase )
for i, t in enumerate(scheduler.timesteps ):
__a = model(__lowercase , __lowercase )
__a = scheduler.step(__lowercase , __lowercase , __lowercase ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
__a = model(__lowercase , __lowercase )
__a = scheduler.step(__lowercase , __lowercase , __lowercase ).prev_sample
return sample
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
__a = dict(self.forward_default_kwargs )
__a = kwargs.pop("""num_inference_steps""" , __lowercase )
for scheduler_class in self.scheduler_classes:
__a = self.get_scheduler_config()
__a = scheduler_class(**__lowercase )
__a = self.dummy_sample
__a = 0.1 * sample
if num_inference_steps is not None and hasattr(__lowercase , """set_timesteps""" ):
scheduler.set_timesteps(__lowercase )
elif num_inference_steps is not None and not hasattr(__lowercase , """set_timesteps""" ):
__a = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
__a = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
__a = dummy_past_residuals[:]
__a = scheduler.timesteps[5]
__a = scheduler.timesteps[6]
__a = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
__a = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
__a = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
__a = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=__lowercase , time_step=__lowercase )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=__lowercase , time_step=__lowercase )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
__a = self.full_loop()
__a = torch.mean(torch.abs(__lowercase ) )
assert abs(result_mean.item() - 2540529 ) < 10
| 302
| 0
|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
lowerCAmelCase__ = logging.get_logger(__name__)
if is_vision_available():
import PIL
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = ['pixel_values']
def __init__( self : List[Any] ,lowercase__ : bool = True ,lowercase__ : Dict[str, int] = None ,lowercase__ : PILImageResampling = PILImageResampling.BICUBIC ,lowercase__ : bool = True ,lowercase__ : Dict[str, int] = None ,lowercase__ : bool = True ,lowercase__ : Union[int, float] = 1 / 2_5_5 ,lowercase__ : bool = True ,lowercase__ : Optional[Union[float, List[float]]] = None ,lowercase__ : Optional[Union[float, List[float]]] = None ,lowercase__ : bool = True ,**lowercase__ : Any ,):
super().__init__(**lowercase__ )
__lowercase = size if size is not None else {'''shortest_edge''': 2_2_4}
__lowercase = get_size_dict(lowercase__ ,default_to_square=lowercase__ )
__lowercase = crop_size if crop_size is not None else {'''height''': 2_2_4, '''width''': 2_2_4}
__lowercase = get_size_dict(lowercase__ ,default_to_square=lowercase__ ,param_name='''crop_size''' )
__lowercase = do_resize
__lowercase = size
__lowercase = resample
__lowercase = do_center_crop
__lowercase = crop_size
__lowercase = do_rescale
__lowercase = rescale_factor
__lowercase = do_normalize
__lowercase = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
__lowercase = image_std if image_std is not None else OPENAI_CLIP_STD
__lowercase = do_convert_rgb
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : np.ndarray ,lowercase__ : Dict[str, int] ,lowercase__ : PILImageResampling = PILImageResampling.BICUBIC ,lowercase__ : Optional[Union[str, ChannelDimension]] = None ,**lowercase__ : str ,):
__lowercase = get_size_dict(lowercase__ ,default_to_square=lowercase__ )
if "shortest_edge" not in size:
raise ValueError(F"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}" )
__lowercase = get_resize_output_image_size(lowercase__ ,size=size['''shortest_edge'''] ,default_to_square=lowercase__ )
return resize(lowercase__ ,size=lowercase__ ,resample=lowercase__ ,data_format=lowercase__ ,**lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : np.ndarray ,lowercase__ : Dict[str, int] ,lowercase__ : Optional[Union[str, ChannelDimension]] = None ,**lowercase__ : Any ,):
__lowercase = get_size_dict(lowercase__ )
if "height" not in size or "width" not in size:
raise ValueError(F"The `size` parameter must contain the keys (height, width). Got {size.keys()}" )
return center_crop(lowercase__ ,size=(size['''height'''], size['''width''']) ,data_format=lowercase__ ,**lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,lowercase__ : np.ndarray ,lowercase__ : Union[int, float] ,lowercase__ : Optional[Union[str, ChannelDimension]] = None ,**lowercase__ : List[Any] ,):
return rescale(lowercase__ ,scale=lowercase__ ,data_format=lowercase__ ,**lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : np.ndarray ,lowercase__ : Union[float, List[float]] ,lowercase__ : Union[float, List[float]] ,lowercase__ : Optional[Union[str, ChannelDimension]] = None ,**lowercase__ : List[str] ,):
return normalize(lowercase__ ,mean=lowercase__ ,std=lowercase__ ,data_format=lowercase__ ,**lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : ImageInput ,lowercase__ : bool = None ,lowercase__ : Dict[str, int] = None ,lowercase__ : PILImageResampling = None ,lowercase__ : bool = None ,lowercase__ : int = None ,lowercase__ : bool = None ,lowercase__ : float = None ,lowercase__ : bool = None ,lowercase__ : Optional[Union[float, List[float]]] = None ,lowercase__ : Optional[Union[float, List[float]]] = None ,lowercase__ : bool = None ,lowercase__ : Optional[Union[str, TensorType]] = None ,lowercase__ : Optional[ChannelDimension] = ChannelDimension.FIRST ,**lowercase__ : Dict ,):
__lowercase = do_resize if do_resize is not None else self.do_resize
__lowercase = size if size is not None else self.size
__lowercase = get_size_dict(lowercase__ ,param_name='''size''' ,default_to_square=lowercase__ )
__lowercase = resample if resample is not None else self.resample
__lowercase = do_center_crop if do_center_crop is not None else self.do_center_crop
__lowercase = crop_size if crop_size is not None else self.crop_size
__lowercase = get_size_dict(lowercase__ ,param_name='''crop_size''' ,default_to_square=lowercase__ )
__lowercase = do_rescale if do_rescale is not None else self.do_rescale
__lowercase = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowercase = do_normalize if do_normalize is not None else self.do_normalize
__lowercase = image_mean if image_mean is not None else self.image_mean
__lowercase = image_std if image_std is not None else self.image_std
__lowercase = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
__lowercase = make_list_of_images(lowercase__ )
if not valid_images(lowercase__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
__lowercase = [convert_to_rgb(lowercase__ ) for image in images]
# All transformations expect numpy arrays.
__lowercase = [to_numpy_array(lowercase__ ) for image in images]
if do_resize:
__lowercase = [self.resize(image=lowercase__ ,size=lowercase__ ,resample=lowercase__ ) for image in images]
if do_center_crop:
__lowercase = [self.center_crop(image=lowercase__ ,size=lowercase__ ) for image in images]
if do_rescale:
__lowercase = [self.rescale(image=lowercase__ ,scale=lowercase__ ) for image in images]
if do_normalize:
__lowercase = [self.normalize(image=lowercase__ ,mean=lowercase__ ,std=lowercase__ ) for image in images]
__lowercase = [to_channel_dimension_format(lowercase__ ,lowercase__ ) for image in images]
__lowercase = {'''pixel_values''': images}
return BatchFeature(data=lowercase__ ,tensor_type=lowercase__ )
| 104
|
from __future__ import annotations
lowerCamelCase__ = {
"""A""": ["""B""", """C""", """E"""],
"""B""": ["""A""", """D""", """E"""],
"""C""": ["""A""", """F""", """G"""],
"""D""": ["""B"""],
"""E""": ["""A""", """B""", """D"""],
"""F""": ["""C"""],
"""G""": ["""C"""],
}
class SCREAMING_SNAKE_CASE :
def __init__( self : Tuple , __lowercase : dict[str, list[str]] , __lowercase : str ):
'''simple docstring'''
__a = graph
# mapping node to its parent in resulting breadth first tree
__a = {}
__a = source_vertex
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
__a = {self.source_vertex}
__a = None
__a = [self.source_vertex] # first in first out queue
while queue:
__a = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(__lowercase )
__a = vertex
queue.append(__lowercase )
def UpperCamelCase_ ( self : Optional[int] , __lowercase : str ):
'''simple docstring'''
if target_vertex == self.source_vertex:
return self.source_vertex
__a = self.parent.get(__lowercase )
if target_vertex_parent is None:
__a = (
F"No path from vertex: {self.source_vertex} to vertex: {target_vertex}"
)
raise ValueError(__lowercase )
return self.shortest_path(__lowercase ) + F"->{target_vertex}"
if __name__ == "__main__":
lowerCamelCase__ = Graph(graph, """G""")
g.breath_first_search()
print(g.shortest_path("""D"""))
print(g.shortest_path("""G"""))
print(g.shortest_path("""Foo"""))
| 302
| 0
|
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( _lowercase : Any ) ->Any:
'''simple docstring'''
a : List[str] = 1
a : List[Any] = 2
while i * i <= n:
a : Any = 0
while n % i == 0:
n //= i
multiplicity += 1
n_divisors *= multiplicity + 1
i += 1
if n > 1:
n_divisors *= 2
return n_divisors
def _SCREAMING_SNAKE_CASE ( ) ->Optional[Any]:
'''simple docstring'''
a : Union[str, Any] = 1
a : str = 1
while True:
i += 1
t_num += i
if count_divisors(_lowercase ) > 500:
break
return t_num
if __name__ == "__main__":
print(solution())
| 105
|
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ , unittest.TestCase ):
__lowerCamelCase : Tuple =KandinskyVaaPriorPipeline
__lowerCamelCase : Union[str, Any] =['prompt']
__lowerCamelCase : Any =['prompt', 'negative_prompt']
__lowerCamelCase : List[str] =[
'num_images_per_prompt',
'generator',
'num_inference_steps',
'latents',
'negative_prompt',
'guidance_scale',
'output_type',
'return_dict',
]
__lowerCamelCase : List[Any] =False
@property
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
return 32
@property
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
return 32
@property
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
return self.time_input_dim
@property
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
return 100
@property
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
__a = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
return tokenizer
@property
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
torch.manual_seed(0 )
__a = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(__lowercase )
@property
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
torch.manual_seed(0 )
__a = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 12,
"""embedding_dim""": self.text_embedder_hidden_size,
"""num_layers""": 1,
}
__a = PriorTransformer(**__lowercase )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
__a = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
__a = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=224 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=14 , )
__a = CLIPVisionModelWithProjection(__lowercase )
return model
@property
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
__a = CLIPImageProcessor(
crop_size=224 , do_center_crop=__lowercase , do_normalize=__lowercase , do_resize=__lowercase , image_mean=[0.48145466, 0.4578275, 0.40821073] , image_std=[0.26862954, 0.26130258, 0.27577711] , resample=3 , size=224 , )
return image_processor
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
__a = self.dummy_prior
__a = self.dummy_image_encoder
__a = self.dummy_text_encoder
__a = self.dummy_tokenizer
__a = self.dummy_image_processor
__a = UnCLIPScheduler(
variance_type="""fixed_small_log""" , prediction_type="""sample""" , num_train_timesteps=1000 , clip_sample=__lowercase , clip_sample_range=10.0 , )
__a = {
"""prior""": prior,
"""image_encoder""": image_encoder,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""scheduler""": scheduler,
"""image_processor""": image_processor,
}
return components
def UpperCamelCase_ ( self : Optional[int] , __lowercase : List[str] , __lowercase : Any=0 ):
'''simple docstring'''
if str(__lowercase ).startswith("""mps""" ):
__a = torch.manual_seed(__lowercase )
else:
__a = torch.Generator(device=__lowercase ).manual_seed(__lowercase )
__a = {
"""prompt""": """horse""",
"""generator""": generator,
"""guidance_scale""": 4.0,
"""num_inference_steps""": 2,
"""output_type""": """np""",
}
return inputs
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
__a = """cpu"""
__a = self.get_dummy_components()
__a = self.pipeline_class(**__lowercase )
__a = pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
__a = pipe(**self.get_dummy_inputs(__lowercase ) )
__a = output.image_embeds
__a = pipe(
**self.get_dummy_inputs(__lowercase ) , return_dict=__lowercase , )[0]
__a = image[0, -10:]
__a = image_from_tuple[0, -10:]
assert image.shape == (1, 32)
__a = np.array(
[-0.0532, 1.7120, 0.3656, -1.0852, -0.8946, -1.1756, 0.4348, 0.2482, 0.5146, -0.1156] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
__a = torch_device == """cpu"""
__a = True
__a = False
self._test_inference_batch_single_identical(
test_max_difference=__lowercase , relax_max_difference=__lowercase , test_mean_pixel_difference=__lowercase , )
@skip_mps
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
__a = torch_device == """cpu"""
__a = False
self._test_attention_slicing_forward_pass(
test_max_difference=__lowercase , test_mean_pixel_difference=__lowercase , )
| 302
| 0
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCamelCase : Any = logging.get_logger(__name__)
__UpperCamelCase : Any = {
'''xlm-roberta-base''': '''https://huggingface.co/xlm-roberta-base/resolve/main/config.json''',
'''xlm-roberta-large''': '''https://huggingface.co/xlm-roberta-large/resolve/main/config.json''',
'''xlm-roberta-large-finetuned-conll02-dutch''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json'''
),
'''xlm-roberta-large-finetuned-conll02-spanish''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json'''
),
'''xlm-roberta-large-finetuned-conll03-english''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json'''
),
'''xlm-roberta-large-finetuned-conll03-german''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json'''
),
}
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
lowercase__ = "xlm-roberta"
def __init__( self : Tuple ,lowercase_ : Optional[int]=3_0_5_2_2 ,lowercase_ : List[Any]=7_6_8 ,lowercase_ : Optional[Any]=1_2 ,lowercase_ : Dict=1_2 ,lowercase_ : List[str]=3_0_7_2 ,lowercase_ : Optional[int]="gelu" ,lowercase_ : Optional[Any]=0.1 ,lowercase_ : Optional[Any]=0.1 ,lowercase_ : List[Any]=5_1_2 ,lowercase_ : List[str]=2 ,lowercase_ : List[str]=0.02 ,lowercase_ : List[str]=1E-12 ,lowercase_ : List[str]=1 ,lowercase_ : Optional[Any]=0 ,lowercase_ : Any=2 ,lowercase_ : Tuple="absolute" ,lowercase_ : Optional[int]=True ,lowercase_ : Optional[int]=None ,**lowercase_ : Tuple ,):
super().__init__(pad_token_id=lowercase_ ,bos_token_id=lowercase_ ,eos_token_id=lowercase_ ,**lowercase_ )
lowerCAmelCase__ : Union[str, Any] = vocab_size
lowerCAmelCase__ : str = hidden_size
lowerCAmelCase__ : int = num_hidden_layers
lowerCAmelCase__ : Union[str, Any] = num_attention_heads
lowerCAmelCase__ : Dict = hidden_act
lowerCAmelCase__ : Any = intermediate_size
lowerCAmelCase__ : int = hidden_dropout_prob
lowerCAmelCase__ : Dict = attention_probs_dropout_prob
lowerCAmelCase__ : Optional[Any] = max_position_embeddings
lowerCAmelCase__ : List[Any] = type_vocab_size
lowerCAmelCase__ : int = initializer_range
lowerCAmelCase__ : Union[str, Any] = layer_norm_eps
lowerCAmelCase__ : Tuple = position_embedding_type
lowerCAmelCase__ : Any = use_cache
lowerCAmelCase__ : int = classifier_dropout
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
@property
def __lowerCAmelCase ( self : Any ):
if self.task == "multiple-choice":
lowerCAmelCase__ : Optional[int] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowerCAmelCase__ : List[str] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 106
|
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = Dict[str, Any]
lowerCamelCase__ = List[Prediction]
@add_end_docstrings(lowerCamelCase__ )
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
def __init__( self : Tuple , *__lowercase : Tuple , **__lowercase : Optional[int] ):
'''simple docstring'''
super().__init__(*__lowercase , **__lowercase )
if self.framework == "tf":
raise ValueError(F"The {self.__class__} is only available in PyTorch." )
requires_backends(self , """vision""" )
self.check_model_type(
dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) )
def UpperCamelCase_ ( self : Optional[int] , **__lowercase : List[str] ):
'''simple docstring'''
__a = {}
if "threshold" in kwargs:
__a = kwargs["""threshold"""]
return {}, {}, postprocess_kwargs
def __call__( self : List[Any] , *__lowercase : Any , **__lowercase : Tuple ):
'''simple docstring'''
return super().__call__(*__lowercase , **__lowercase )
def UpperCamelCase_ ( self : str , __lowercase : Tuple ):
'''simple docstring'''
__a = load_image(__lowercase )
__a = torch.IntTensor([[image.height, image.width]] )
__a = self.image_processor(images=[image] , return_tensors="""pt""" )
if self.tokenizer is not None:
__a = self.tokenizer(text=inputs["""words"""] , boxes=inputs["""boxes"""] , return_tensors="""pt""" )
__a = target_size
return inputs
def UpperCamelCase_ ( self : Dict , __lowercase : List[str] ):
'''simple docstring'''
__a = model_inputs.pop("""target_size""" )
__a = self.model(**__lowercase )
__a = outputs.__class__({"""target_size""": target_size, **outputs} )
if self.tokenizer is not None:
__a = model_inputs["""bbox"""]
return model_outputs
def UpperCamelCase_ ( self : Optional[int] , __lowercase : List[Any] , __lowercase : List[Any]=0.9 ):
'''simple docstring'''
__a = model_outputs["""target_size"""]
if self.tokenizer is not None:
# This is a LayoutLMForTokenClassification variant.
# The OCR got the boxes and the model classified the words.
__a , __a = target_size[0].tolist()
def unnormalize(__lowercase : Optional[Any] ):
return self._get_bounding_box(
torch.Tensor(
[
(width * bbox[0] / 1000),
(height * bbox[1] / 1000),
(width * bbox[2] / 1000),
(height * bbox[3] / 1000),
] ) )
__a , __a = model_outputs["""logits"""].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 )
__a = [self.model.config.idalabel[prediction] for prediction in classes.tolist()]
__a = [unnormalize(__lowercase ) for bbox in model_outputs["""bbox"""].squeeze(0 )]
__a = ["""score""", """label""", """box"""]
__a = [dict(zip(__lowercase , __lowercase ) ) for vals in zip(scores.tolist() , __lowercase , __lowercase ) if vals[0] > threshold]
else:
# This is a regular ForObjectDetectionModel
__a = self.image_processor.post_process_object_detection(__lowercase , __lowercase , __lowercase )
__a = raw_annotations[0]
__a = raw_annotation["""scores"""]
__a = raw_annotation["""labels"""]
__a = raw_annotation["""boxes"""]
__a = scores.tolist()
__a = [self.model.config.idalabel[label.item()] for label in labels]
__a = [self._get_bounding_box(__lowercase ) for box in boxes]
# {"scores": [...], ...} --> [{"score":x, ...}, ...]
__a = ["""score""", """label""", """box"""]
__a = [
dict(zip(__lowercase , __lowercase ) )
for vals in zip(raw_annotation["""scores"""] , raw_annotation["""labels"""] , raw_annotation["""boxes"""] )
]
return annotation
def UpperCamelCase_ ( self : Optional[int] , __lowercase : "torch.Tensor" ):
'''simple docstring'''
if self.framework != "pt":
raise ValueError("""The ObjectDetectionPipeline is only available in PyTorch.""" )
__a , __a , __a , __a = box.int().tolist()
__a = {
"""xmin""": xmin,
"""ymin""": ymin,
"""xmax""": xmax,
"""ymax""": ymax,
}
return bbox
| 302
| 0
|
import unittest
from transformers.testing_utils import require_bsa
from transformers.utils import is_bsa_available
from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin
if is_bsa_available():
from transformers import MarkupLMFeatureExtractor
class snake_case__ (unittest.TestCase ):
"""simple docstring"""
def __init__( self : str , __lowerCamelCase : Optional[int] ) -> List[str]:
a = parent
def __UpperCAmelCase ( self : Optional[int] ) -> List[str]:
return {}
def __magic_name__ ( ):
'''simple docstring'''
a = "<HTML>\n\n <HEAD>\n <TITLE>sample document</TITLE>\n </HEAD>\n\n <BODY BGCOLOR=\"FFFFFF\">\n <HR>\n <a href=\"http://google.com\">Goog</a>\n <H1>This is one header</H1>\n <H2>This is a another Header</H2>\n <P>Travel from\n <P>\n <B>SFO to JFK</B>\n <BR>\n <B><I>on May 2, 2015 at 2:00 pm. For details go to confirm.com </I></B>\n <HR>\n <div style=\"color:#0000FF\">\n <h3>Traveler <b> name </b> is\n <p> John Doe </p>\n </div>"
a = "\n <!DOCTYPE html>\n <html>\n <body>\n\n <h1>My First Heading</h1>\n <p>My first paragraph.</p>\n\n </body>\n </html>\n "
return [html_string_a, html_string_a]
@require_bsa
class snake_case__ (_UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = MarkupLMFeatureExtractor if is_bsa_available() else None
def __UpperCAmelCase ( self : int ) -> Union[str, Any]:
a = MarkupLMFeatureExtractionTester(self )
@property
def __UpperCAmelCase ( self : Optional[int] ) -> Optional[Any]:
return self.feature_extract_tester.prepare_feat_extract_dict()
def __UpperCAmelCase ( self : int ) -> Tuple:
# Initialize feature_extractor
a = self.feature_extraction_class()
# Test not batched input
a = get_html_strings()[0]
a = feature_extractor(__lowerCamelCase )
# fmt: off
a = [["sample document", "Goog", "This is one header", "This is a another Header", "Travel from", "SFO to JFK", "on May 2, 2015 at 2:00 pm. For details go to confirm.com", "Traveler", "name", "is", "John Doe"]]
a = [["/html/head/title", "/html/body/a", "/html/body/h1", "/html/body/h2", "/html/body/p", "/html/body/p/p/b[1]", "/html/body/p/p/b[2]/i", "/html/body/p/p/div/h3", "/html/body/p/p/div/h3/b", "/html/body/p/p/div/h3", "/html/body/p/p/div/h3/p"]]
# fmt: on
self.assertEqual(encoding.nodes , __lowerCamelCase )
self.assertEqual(encoding.xpaths , __lowerCamelCase )
# Test batched
a = get_html_strings()
a = feature_extractor(__lowerCamelCase )
# fmt: off
a = expected_nodes + [["My First Heading", "My first paragraph."]]
a = expected_xpaths + [["/html/body/h1", "/html/body/p"]]
self.assertEqual(len(encoding.nodes ) , 2 )
self.assertEqual(len(encoding.xpaths ) , 2 )
self.assertEqual(encoding.nodes , __lowerCamelCase )
self.assertEqual(encoding.xpaths , __lowerCamelCase )
| 107
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCamelCase__ = {
"""configuration_efficientnet""": [
"""EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""EfficientNetConfig""",
"""EfficientNetOnnxConfig""",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""EfficientNetImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""EfficientNetForImageClassification""",
"""EfficientNetModel""",
"""EfficientNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_efficientnet import (
EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
EfficientNetConfig,
EfficientNetOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientnet import EfficientNetImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientnet import (
EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientNetForImageClassification,
EfficientNetModel,
EfficientNetPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 302
| 0
|
"""simple docstring"""
def a__ ( SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float ):
'''simple docstring'''
if mass < 0:
raise ValueError("The mass of a body cannot be negative" )
return 0.5 * mass * abs(SCREAMING_SNAKE_CASE ) * abs(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 108
|
import random
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
__a , __a , __a = [], [], []
for element in data:
if element < pivot:
less.append(_SCREAMING_SNAKE_CASE )
elif element > pivot:
greater.append(_SCREAMING_SNAKE_CASE )
else:
equal.append(_SCREAMING_SNAKE_CASE )
return less, equal, greater
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
if index >= len(_SCREAMING_SNAKE_CASE ) or index < 0:
return None
__a = items[random.randint(0 , len(_SCREAMING_SNAKE_CASE ) - 1 )]
__a = 0
__a , __a , __a = _partition(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__a = len(_SCREAMING_SNAKE_CASE )
__a = len(_SCREAMING_SNAKE_CASE )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# must be in larger
else:
return quick_select(_SCREAMING_SNAKE_CASE , index - (m + count) )
| 302
| 0
|
"""simple docstring"""
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
A: Union[str, Any] = argparse.ArgumentParser(
description=(
"Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned"
" Distillation"
)
)
parser.add_argument("--model_type", default="bert", choices=["bert"])
parser.add_argument("--model_name", default="bert-base-uncased", type=str)
parser.add_argument("--dump_checkpoint", default="serialization_dir/tf_bert-base-uncased_0247911.pth", type=str)
parser.add_argument("--vocab_transform", action="store_true")
A: Union[str, Any] = parser.parse_args()
if args.model_type == "bert":
A: Dict = BertForMaskedLM.from_pretrained(args.model_name)
A: Dict = "bert"
else:
raise ValueError("args.model_type should be \"bert\".")
A: str = model.state_dict()
A: str = {}
for w in ["word_embeddings", "position_embeddings"]:
A: Any = state_dict[f"""{prefix}.embeddings.{w}.weight"""]
for w in ["weight", "bias"]:
A: Optional[Any] = state_dict[f"""{prefix}.embeddings.LayerNorm.{w}"""]
A: int = 0
for teacher_idx in [0, 2, 4, 7, 9, 1_1]:
for w in ["weight", "bias"]:
A: Optional[int] = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}"""
]
A: List[str] = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}"""
]
A: List[Any] = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}"""
]
A: List[Any] = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}"""
]
A: Dict = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}"""
]
A: Optional[Any] = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}"""
]
A: Optional[int] = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}"""
]
A: Union[str, Any] = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}"""
]
std_idx += 1
A: Dict = state_dict["cls.predictions.decoder.weight"]
A: Union[str, Any] = state_dict["cls.predictions.bias"]
if args.vocab_transform:
for w in ["weight", "bias"]:
A: List[str] = state_dict[f"""cls.predictions.transform.dense.{w}"""]
A: Optional[Any] = state_dict[f"""cls.predictions.transform.LayerNorm.{w}"""]
print(f"""N layers selected for distillation: {std_idx}""")
print(f"""Number of params transferred for distillation: {len(compressed_sd.keys())}""")
print(f"""Save transferred checkpoint to {args.dump_checkpoint}.""")
torch.save(compressed_sd, args.dump_checkpoint)
| 109
|
from collections import UserDict
from typing import Union
import numpy as np
import requests
from ..utils import (
add_end_docstrings,
logging,
)
from .audio_classification import ffmpeg_read
from .base import PIPELINE_INIT_ARGS, Pipeline
lowerCamelCase__ = logging.get_logger(__name__)
@add_end_docstrings(lowerCamelCase__ )
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
def __init__( self : Optional[int] , **__lowercase : Dict ):
'''simple docstring'''
super().__init__(**__lowercase )
if self.framework != "pt":
raise ValueError(F"The {self.__class__} is only available in PyTorch." )
# No specific FOR_XXX available yet
def __call__( self : str , __lowercase : Union[np.ndarray, bytes, str] , **__lowercase : int ):
'''simple docstring'''
return super().__call__(__lowercase , **__lowercase )
def UpperCamelCase_ ( self : List[Any] , **__lowercase : Union[str, Any] ):
'''simple docstring'''
__a = {}
if "candidate_labels" in kwargs:
__a = kwargs["""candidate_labels"""]
if "hypothesis_template" in kwargs:
__a = kwargs["""hypothesis_template"""]
return preprocess_params, {}, {}
def UpperCamelCase_ ( self : int , __lowercase : Dict , __lowercase : Dict=None , __lowercase : str="This is a sound of {}." ):
'''simple docstring'''
if isinstance(__lowercase , __lowercase ):
if audio.startswith("""http://""" ) or audio.startswith("""https://""" ):
# We need to actually check for a real protocol, otherwise it's impossible to use a local file
# like http_huggingface_co.png
__a = requests.get(__lowercase ).content
else:
with open(__lowercase , """rb""" ) as f:
__a = f.read()
if isinstance(__lowercase , __lowercase ):
__a = ffmpeg_read(__lowercase , self.feature_extractor.sampling_rate )
if not isinstance(__lowercase , np.ndarray ):
raise ValueError("""We expect a numpy ndarray as input""" )
if len(audio.shape ) != 1:
raise ValueError("""We expect a single channel audio input for ZeroShotAudioClassificationPipeline""" )
__a = self.feature_extractor(
[audio] , sampling_rate=self.feature_extractor.sampling_rate , return_tensors="""pt""" )
__a = candidate_labels
__a = [hypothesis_template.format(__lowercase ) for x in candidate_labels]
__a = self.tokenizer(__lowercase , return_tensors=self.framework , padding=__lowercase )
__a = [text_inputs]
return inputs
def UpperCamelCase_ ( self : Any , __lowercase : Any ):
'''simple docstring'''
__a = model_inputs.pop("""candidate_labels""" )
__a = model_inputs.pop("""text_inputs""" )
if isinstance(text_inputs[0] , __lowercase ):
__a = text_inputs[0]
else:
# Batching case.
__a = text_inputs[0][0]
__a = self.model(**__lowercase , **__lowercase )
__a = {
"""candidate_labels""": candidate_labels,
"""logits""": outputs.logits_per_audio,
}
return model_outputs
def UpperCamelCase_ ( self : Optional[Any] , __lowercase : Dict ):
'''simple docstring'''
__a = model_outputs.pop("""candidate_labels""" )
__a = model_outputs["""logits"""][0]
if self.framework == "pt":
__a = logits.softmax(dim=0 )
__a = probs.tolist()
else:
raise ValueError("""`tf` framework not supported.""" )
__a = [
{"""score""": score, """label""": candidate_label}
for score, candidate_label in sorted(zip(__lowercase , __lowercase ) , key=lambda __lowercase : -x[0] )
]
return result
| 302
| 0
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class snake_case__ ( unittest.TestCase ):
def __magic_name__ ( self ) -> List[str]:
__magic_name__ : str = tempfile.mkdtemp()
# fmt: off
__magic_name__ : Tuple = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing""", """,""", """low""", """lowest"""]
# fmt: on
__magic_name__ : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
__magic_name__ : Any = {
"""do_resize""": True,
"""size""": {"""height""": 18, """width""": 18},
"""do_normalize""": True,
"""image_mean""": [0.5, 0.5, 0.5],
"""image_std""": [0.5, 0.5, 0.5],
}
__magic_name__ : Tuple = os.path.join(self.tmpdirname , __lowercase )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(__lowercase , __lowercase )
def __magic_name__ ( self , **lowerCAmelCase__ ) -> List[str]:
return BertTokenizer.from_pretrained(self.tmpdirname , **__lowercase )
def __magic_name__ ( self , **lowerCAmelCase__ ) -> List[Any]:
return ViTImageProcessor.from_pretrained(self.tmpdirname , **__lowercase )
def __magic_name__ ( self ) -> str:
shutil.rmtree(self.tmpdirname )
def __magic_name__ ( self ) -> List[Any]:
__magic_name__ : List[str] = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
__magic_name__ : int = [Image.fromarray(np.moveaxis(__lowercase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __magic_name__ ( self ) -> Union[str, Any]:
__magic_name__ : str = self.get_tokenizer()
__magic_name__ : Any = self.get_image_processor()
__magic_name__ : List[Any] = VisionTextDualEncoderProcessor(tokenizer=__lowercase , image_processor=__lowercase )
processor.save_pretrained(self.tmpdirname )
__magic_name__ : Optional[Any] = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowercase )
def __magic_name__ ( self ) -> Tuple:
__magic_name__ : int = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__magic_name__ : Optional[int] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
__magic_name__ : List[Any] = self.get_image_processor(do_normalize=__lowercase , padding_value=1.0 )
__magic_name__ : int = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=__lowercase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowercase )
def __magic_name__ ( self ) -> Optional[Any]:
__magic_name__ : Optional[int] = self.get_image_processor()
__magic_name__ : Optional[Any] = self.get_tokenizer()
__magic_name__ : Optional[Any] = VisionTextDualEncoderProcessor(tokenizer=__lowercase , image_processor=__lowercase )
__magic_name__ : List[Any] = self.prepare_image_inputs()
__magic_name__ : Optional[Any] = image_processor(__lowercase , return_tensors="""np""" )
__magic_name__ : List[Any] = processor(images=__lowercase , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __magic_name__ ( self ) -> List[Any]:
__magic_name__ : str = self.get_image_processor()
__magic_name__ : Union[str, Any] = self.get_tokenizer()
__magic_name__ : List[str] = VisionTextDualEncoderProcessor(tokenizer=__lowercase , image_processor=__lowercase )
__magic_name__ : List[Any] = """lower newer"""
__magic_name__ : Union[str, Any] = processor(text=__lowercase )
__magic_name__ : Optional[Any] = tokenizer(__lowercase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __magic_name__ ( self ) -> List[str]:
__magic_name__ : Optional[Any] = self.get_image_processor()
__magic_name__ : Dict = self.get_tokenizer()
__magic_name__ : Tuple = VisionTextDualEncoderProcessor(tokenizer=__lowercase , image_processor=__lowercase )
__magic_name__ : List[str] = """lower newer"""
__magic_name__ : Optional[int] = self.prepare_image_inputs()
__magic_name__ : int = processor(text=__lowercase , images=__lowercase )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """token_type_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with self.assertRaises(__lowercase ):
processor()
def __magic_name__ ( self ) -> int:
__magic_name__ : Optional[Any] = self.get_image_processor()
__magic_name__ : str = self.get_tokenizer()
__magic_name__ : Optional[int] = VisionTextDualEncoderProcessor(tokenizer=__lowercase , image_processor=__lowercase )
__magic_name__ : str = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__magic_name__ : List[str] = processor.batch_decode(__lowercase )
__magic_name__ : List[Any] = tokenizer.batch_decode(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
def __magic_name__ ( self ) -> str:
__magic_name__ : List[str] = self.get_image_processor()
__magic_name__ : List[str] = self.get_tokenizer()
__magic_name__ : List[Any] = VisionTextDualEncoderProcessor(tokenizer=__lowercase , image_processor=__lowercase )
__magic_name__ : int = """lower newer"""
__magic_name__ : Tuple = self.prepare_image_inputs()
__magic_name__ : List[str] = processor(text=__lowercase , images=__lowercase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 342
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_batched,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
lowerCamelCase__ = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
__lowerCamelCase : Dict =['pixel_values']
def __init__( self : Optional[int] , __lowercase : bool = True , __lowercase : Optional[Dict[str, int]] = None , __lowercase : PILImageResampling = PILImageResampling.BICUBIC , __lowercase : bool = True , __lowercase : bool = True , __lowercase : Union[int, float] = 1 / 255 , __lowercase : Dict[str, int] = None , __lowercase : bool = True , __lowercase : Optional[Union[float, List[float]]] = None , __lowercase : Optional[Union[float, List[float]]] = None , **__lowercase : Dict , ):
'''simple docstring'''
super().__init__(**__lowercase )
__a = size if size is not None else {"""height""": 224, """width""": 224}
__a = get_size_dict(__lowercase )
__a = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
__a = get_size_dict(__lowercase , default_to_square=__lowercase , param_name="""crop_size""" )
__a = do_resize
__a = do_rescale
__a = do_normalize
__a = do_center_crop
__a = crop_size
__a = size
__a = resample
__a = rescale_factor
__a = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
__a = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def UpperCamelCase_ ( self : Any , __lowercase : np.ndarray , __lowercase : Dict[str, int] , __lowercase : PILImageResampling = PILImageResampling.BILINEAR , __lowercase : Optional[Union[str, ChannelDimension]] = None , **__lowercase : Optional[Any] , ):
'''simple docstring'''
__a = get_size_dict(__lowercase )
if "shortest_edge" in size:
__a = get_resize_output_image_size(__lowercase , size=size["""shortest_edge"""] , default_to_square=__lowercase )
# size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"])
elif "height" in size and "width" in size:
__a = (size["""height"""], size["""width"""])
else:
raise ValueError(F"Size must contain 'height' and 'width' keys or 'shortest_edge' key. Got {size.keys()}" )
return resize(__lowercase , size=__lowercase , resample=__lowercase , data_format=__lowercase , **__lowercase )
def UpperCamelCase_ ( self : str , __lowercase : np.ndarray , __lowercase : Dict[str, int] , __lowercase : Optional[Union[str, ChannelDimension]] = None , **__lowercase : List[Any] , ):
'''simple docstring'''
__a = get_size_dict(__lowercase )
if "height" not in size or "width" not in size:
raise ValueError(F"The `size` parameter must contain the keys (height, width). Got {size.keys()}" )
return center_crop(__lowercase , size=(size["""height"""], size["""width"""]) , data_format=__lowercase , **__lowercase )
def UpperCamelCase_ ( self : Any , __lowercase : np.ndarray , __lowercase : float , __lowercase : Optional[Union[str, ChannelDimension]] = None , **__lowercase : str ):
'''simple docstring'''
return rescale(__lowercase , scale=__lowercase , data_format=__lowercase , **__lowercase )
def UpperCamelCase_ ( self : List[Any] , __lowercase : np.ndarray , __lowercase : Union[float, List[float]] , __lowercase : Union[float, List[float]] , __lowercase : Optional[Union[str, ChannelDimension]] = None , **__lowercase : Any , ):
'''simple docstring'''
return normalize(__lowercase , mean=__lowercase , std=__lowercase , data_format=__lowercase , **__lowercase )
def UpperCamelCase_ ( self : Tuple , __lowercase : ImageInput , __lowercase : Optional[bool] = None , __lowercase : Dict[str, int] = None , __lowercase : PILImageResampling = None , __lowercase : bool = None , __lowercase : int = None , __lowercase : Optional[bool] = None , __lowercase : Optional[float] = None , __lowercase : Optional[bool] = None , __lowercase : Optional[Union[float, List[float]]] = None , __lowercase : Optional[Union[float, List[float]]] = None , __lowercase : Optional[Union[str, TensorType]] = None , __lowercase : Union[str, ChannelDimension] = ChannelDimension.FIRST , **__lowercase : List[Any] , ):
'''simple docstring'''
__a = do_resize if do_resize is not None else self.do_resize
__a = do_rescale if do_rescale is not None else self.do_rescale
__a = do_normalize if do_normalize is not None else self.do_normalize
__a = do_center_crop if do_center_crop is not None else self.do_center_crop
__a = crop_size if crop_size is not None else self.crop_size
__a = get_size_dict(__lowercase , param_name="""crop_size""" , default_to_square=__lowercase )
__a = resample if resample is not None else self.resample
__a = rescale_factor if rescale_factor is not None else self.rescale_factor
__a = image_mean if image_mean is not None else self.image_mean
__a = image_std if image_std is not None else self.image_std
__a = size if size is not None else self.size
__a = get_size_dict(__lowercase )
if not is_batched(__lowercase ):
__a = [images]
if not valid_images(__lowercase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
# All transformations expect numpy arrays.
__a = [to_numpy_array(__lowercase ) for image in images]
if do_resize:
__a = [self.resize(image=__lowercase , size=__lowercase , resample=__lowercase ) for image in images]
if do_center_crop:
__a = [self.center_crop(image=__lowercase , size=__lowercase ) for image in images]
if do_rescale:
__a = [self.rescale(image=__lowercase , scale=__lowercase ) for image in images]
if do_normalize:
__a = [self.normalize(image=__lowercase , mean=__lowercase , std=__lowercase ) for image in images]
__a = [to_channel_dimension_format(__lowercase , __lowercase ) for image in images]
__a = {"""pixel_values""": images}
return BatchFeature(data=__lowercase , tensor_type=__lowercase )
| 302
| 0
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available, is_torch_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow
if is_tf_available():
from transformers import (
AutoConfig,
BertConfig,
GPTaConfig,
TaConfig,
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
if is_torch_available():
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelWithLMHead,
BertForMaskedLM,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertModel,
GPTaLMHeadModel,
RobertaForMaskedLM,
TaForConditionalGeneration,
)
@is_pt_tf_cross_test
class lowerCAmelCase_( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCAmelCase_ ( self ) -> Dict:
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
lowerCAmelCase__ : int = AutoConfig.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase ,__lowercase )
lowerCAmelCase__ : List[Any] = TFAutoModel.from_pretrained(__lowercase ,from_pt=__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase ,__lowercase )
lowerCAmelCase__ : Optional[int] = AutoModel.from_pretrained(__lowercase ,from_tf=__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase ,__lowercase )
@slow
def UpperCAmelCase_ ( self ) -> Optional[int]:
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
lowerCAmelCase__ : Dict = AutoConfig.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase ,__lowercase )
lowerCAmelCase__ : str = TFAutoModelForPreTraining.from_pretrained(__lowercase ,from_pt=__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase ,__lowercase )
lowerCAmelCase__ : Union[str, Any] = AutoModelForPreTraining.from_pretrained(__lowercase ,from_tf=__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase ,__lowercase )
@slow
def UpperCAmelCase_ ( self ) -> List[str]:
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ : List[Any] = AutoConfig.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase ,__lowercase )
lowerCAmelCase__ : List[str] = TFAutoModelForCausalLM.from_pretrained(__lowercase ,from_pt=__lowercase )
lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = TFAutoModelForCausalLM.from_pretrained(
__lowercase ,output_loading_info=__lowercase ,from_pt=__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase ,__lowercase )
lowerCAmelCase__ : Dict = AutoModelForCausalLM.from_pretrained(__lowercase ,from_tf=__lowercase )
lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = AutoModelForCausalLM.from_pretrained(
__lowercase ,output_loading_info=__lowercase ,from_tf=__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase ,__lowercase )
@slow
def UpperCAmelCase_ ( self ) -> Dict:
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ : Optional[Any] = AutoConfig.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase ,__lowercase )
lowerCAmelCase__ : Tuple = TFAutoModelWithLMHead.from_pretrained(__lowercase ,from_pt=__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase ,__lowercase )
lowerCAmelCase__ : Dict = AutoModelWithLMHead.from_pretrained(__lowercase ,from_tf=__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase ,__lowercase )
@slow
def UpperCAmelCase_ ( self ) -> Optional[Any]:
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ : List[Any] = AutoConfig.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase ,__lowercase )
lowerCAmelCase__ : Optional[int] = TFAutoModelForMaskedLM.from_pretrained(__lowercase ,from_pt=__lowercase )
lowerCAmelCase__ , lowerCAmelCase__ : str = TFAutoModelForMaskedLM.from_pretrained(
__lowercase ,output_loading_info=__lowercase ,from_pt=__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase ,__lowercase )
lowerCAmelCase__ : str = AutoModelForMaskedLM.from_pretrained(__lowercase ,from_tf=__lowercase )
lowerCAmelCase__ , lowerCAmelCase__ : Any = AutoModelForMaskedLM.from_pretrained(
__lowercase ,output_loading_info=__lowercase ,from_tf=__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase ,__lowercase )
@slow
def UpperCAmelCase_ ( self ) -> Tuple:
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ : int = AutoConfig.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase ,__lowercase )
lowerCAmelCase__ : Optional[int] = TFAutoModelForSeqaSeqLM.from_pretrained(__lowercase ,from_pt=__lowercase )
lowerCAmelCase__ , lowerCAmelCase__ : str = TFAutoModelForSeqaSeqLM.from_pretrained(
__lowercase ,output_loading_info=__lowercase ,from_pt=__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase ,__lowercase )
lowerCAmelCase__ : List[str] = AutoModelForSeqaSeqLM.from_pretrained(__lowercase ,from_tf=__lowercase )
lowerCAmelCase__ , lowerCAmelCase__ : Dict = AutoModelForSeqaSeqLM.from_pretrained(
__lowercase ,output_loading_info=__lowercase ,from_tf=__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase ,__lowercase )
@slow
def UpperCAmelCase_ ( self ) -> int:
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
lowerCAmelCase__ : List[Any] = AutoConfig.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase ,__lowercase )
lowerCAmelCase__ : Optional[int] = TFAutoModelForSequenceClassification.from_pretrained(__lowercase ,from_pt=__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase ,__lowercase )
lowerCAmelCase__ : Optional[int] = AutoModelForSequenceClassification.from_pretrained(__lowercase ,from_tf=__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase ,__lowercase )
@slow
def UpperCAmelCase_ ( self ) -> List[str]:
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
lowerCAmelCase__ : int = AutoConfig.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase ,__lowercase )
lowerCAmelCase__ : str = TFAutoModelForQuestionAnswering.from_pretrained(__lowercase ,from_pt=__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase ,__lowercase )
lowerCAmelCase__ : int = AutoModelForQuestionAnswering.from_pretrained(__lowercase ,from_tf=__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase ,__lowercase )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
lowerCAmelCase__ : Dict = TFAutoModelWithLMHead.from_pretrained(__lowercase ,from_pt=__lowercase )
self.assertIsInstance(__lowercase ,__lowercase )
self.assertEqual(model.num_parameters() ,1_4410 )
self.assertEqual(model.num_parameters(only_trainable=__lowercase ) ,1_4410 )
lowerCAmelCase__ : Optional[int] = AutoModelWithLMHead.from_pretrained(__lowercase ,from_tf=__lowercase )
self.assertIsInstance(__lowercase ,__lowercase )
self.assertEqual(model.num_parameters() ,1_4410 )
self.assertEqual(model.num_parameters(only_trainable=__lowercase ) ,1_4410 )
def UpperCAmelCase_ ( self ) -> int:
lowerCAmelCase__ : Union[str, Any] = TFAutoModelWithLMHead.from_pretrained(__lowercase ,from_pt=__lowercase )
self.assertIsInstance(__lowercase ,__lowercase )
self.assertEqual(model.num_parameters() ,1_4410 )
self.assertEqual(model.num_parameters(only_trainable=__lowercase ) ,1_4410 )
lowerCAmelCase__ : int = AutoModelWithLMHead.from_pretrained(__lowercase ,from_tf=__lowercase )
self.assertIsInstance(__lowercase ,__lowercase )
self.assertEqual(model.num_parameters() ,1_4410 )
self.assertEqual(model.num_parameters(only_trainable=__lowercase ) ,1_4410 )
| 37
|
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
__a = """hf-internal-testing/tiny-random-t5"""
__a = AutoTokenizer.from_pretrained(__lowercase )
__a = AutoModelForSeqaSeqLM.from_pretrained(__lowercase )
__a = tokenizer("""This is me""" , return_tensors="""pt""" )
__a = model.to_bettertransformer()
self.assertTrue(any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model.named_modules() ) )
__a = model.generate(**__lowercase )
__a = model.reverse_bettertransformer()
self.assertFalse(any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model.named_modules() ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__lowercase )
__a = AutoModelForSeqaSeqLM.from_pretrained(__lowercase )
self.assertFalse(
any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) )
__a = model_reloaded.generate(**__lowercase )
self.assertTrue(torch.allclose(__lowercase , __lowercase ) )
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
__a = """hf-internal-testing/tiny-random-t5"""
__a = AutoModelForSeqaSeqLM.from_pretrained(__lowercase )
__a = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(__lowercase ):
model.save_pretrained(__lowercase )
__a = model.reverse_bettertransformer()
model.save_pretrained(__lowercase )
| 302
| 0
|
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : int) -> int:
'''simple docstring'''
__UpperCamelCase : Optional[Any] = 1
__UpperCamelCase : int = 2
while i * i <= n:
__UpperCamelCase : List[str] = 0
while n % i == 0:
n //= i
multiplicity += 1
n_divisors *= multiplicity + 1
i += 1
if n > 1:
n_divisors *= 2
return n_divisors
def _SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
'''simple docstring'''
__UpperCamelCase : int = 1
__UpperCamelCase : str = 1
while True:
i += 1
t_num += i
if count_divisors(_SCREAMING_SNAKE_CASE) > 500:
break
return t_num
if __name__ == "__main__":
print(solution())
| 232
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
lowerCamelCase__ = {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/config.json""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/config.json""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/config.json""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/config.json""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/config.json""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/config.json""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json""",
}
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
__lowerCamelCase : Optional[Any] ='albert'
def __init__( self : Optional[Any] , __lowercase : Union[str, Any]=30000 , __lowercase : List[str]=128 , __lowercase : Optional[Any]=4096 , __lowercase : Dict=12 , __lowercase : Any=1 , __lowercase : Optional[Any]=64 , __lowercase : Any=16384 , __lowercase : Any=1 , __lowercase : Union[str, Any]="gelu_new" , __lowercase : List[str]=0 , __lowercase : int=0 , __lowercase : Dict=512 , __lowercase : str=2 , __lowercase : List[str]=0.02 , __lowercase : Union[str, Any]=1E-12 , __lowercase : int=0.1 , __lowercase : Any="absolute" , __lowercase : Optional[int]=0 , __lowercase : Dict=2 , __lowercase : Optional[Any]=3 , **__lowercase : Any , ):
'''simple docstring'''
super().__init__(pad_token_id=__lowercase , bos_token_id=__lowercase , eos_token_id=__lowercase , **__lowercase )
__a = vocab_size
__a = embedding_size
__a = hidden_size
__a = num_hidden_layers
__a = num_hidden_groups
__a = num_attention_heads
__a = inner_group_num
__a = hidden_act
__a = intermediate_size
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = type_vocab_size
__a = initializer_range
__a = layer_norm_eps
__a = classifier_dropout_prob
__a = position_embedding_type
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
@property
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
if self.task == "multiple-choice":
__a = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
__a = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 302
| 0
|
from __future__ import annotations
def _UpperCAmelCase (UpperCamelCase__ : list ):
if not nums:
raise ValueError("List is empty" )
return sum(_SCREAMING_SNAKE_CASE ) / len(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 11
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase__ = {
"""configuration_blip""": [
"""BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BlipConfig""",
"""BlipTextConfig""",
"""BlipVisionConfig""",
],
"""processing_blip""": ["""BlipProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""BlipImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BlipModel""",
"""BlipPreTrainedModel""",
"""BlipForConditionalGeneration""",
"""BlipForQuestionAnswering""",
"""BlipVisionModel""",
"""BlipTextModel""",
"""BlipForImageTextRetrieval""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFBlipModel""",
"""TFBlipPreTrainedModel""",
"""TFBlipForConditionalGeneration""",
"""TFBlipForQuestionAnswering""",
"""TFBlipVisionModel""",
"""TFBlipTextModel""",
"""TFBlipForImageTextRetrieval""",
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 302
| 0
|
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _A( lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase : Tuple = KandinskyVaaPriorPipeline
UpperCamelCase : Union[str, Any] = ['prompt']
UpperCamelCase : Any = ['prompt', 'negative_prompt']
UpperCamelCase : List[str] = [
'num_images_per_prompt',
'generator',
'num_inference_steps',
'latents',
'negative_prompt',
'guidance_scale',
'output_type',
'return_dict',
]
UpperCamelCase : List[Any] = False
@property
def UpperCAmelCase_ ( self ):
return 32
@property
def UpperCAmelCase_ ( self ):
return 32
@property
def UpperCAmelCase_ ( self ):
return self.time_input_dim
@property
def UpperCAmelCase_ ( self ):
return self.time_input_dim * 4
@property
def UpperCAmelCase_ ( self ):
return 100
@property
def UpperCAmelCase_ ( self ):
__A : Dict = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def UpperCAmelCase_ ( self ):
torch.manual_seed(0 )
__A : Optional[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(__lowercase )
@property
def UpperCAmelCase_ ( self ):
torch.manual_seed(0 )
__A : List[str] = {
'num_attention_heads': 2,
'attention_head_dim': 12,
'embedding_dim': self.text_embedder_hidden_size,
'num_layers': 1,
}
__A : Dict = PriorTransformer(**__lowercase )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
__A : str = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def UpperCAmelCase_ ( self ):
torch.manual_seed(0 )
__A : Any = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=224 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=14 , )
__A : int = CLIPVisionModelWithProjection(__lowercase )
return model
@property
def UpperCAmelCase_ ( self ):
__A : List[str] = CLIPImageProcessor(
crop_size=224 , do_center_crop=__lowercase , do_normalize=__lowercase , do_resize=__lowercase , image_mean=[0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3] , image_std=[0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1] , resample=3 , size=224 , )
return image_processor
def UpperCAmelCase_ ( self ):
__A : List[str] = self.dummy_prior
__A : Optional[int] = self.dummy_image_encoder
__A : Optional[Any] = self.dummy_text_encoder
__A : List[str] = self.dummy_tokenizer
__A : int = self.dummy_image_processor
__A : Any = UnCLIPScheduler(
variance_type='fixed_small_log' , prediction_type='sample' , num_train_timesteps=1000 , clip_sample=__lowercase , clip_sample_range=1_0.0 , )
__A : Tuple = {
'prior': prior,
'image_encoder': image_encoder,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'scheduler': scheduler,
'image_processor': image_processor,
}
return components
def UpperCAmelCase_ ( self , _A , _A=0 ):
if str(__lowercase ).startswith('mps' ):
__A : List[str] = torch.manual_seed(__lowercase )
else:
__A : Optional[int] = torch.Generator(device=__lowercase ).manual_seed(__lowercase )
__A : Any = {
'prompt': 'horse',
'generator': generator,
'guidance_scale': 4.0,
'num_inference_steps': 2,
'output_type': 'np',
}
return inputs
def UpperCAmelCase_ ( self ):
__A : int = 'cpu'
__A : int = self.get_dummy_components()
__A : Tuple = self.pipeline_class(**__lowercase )
__A : Tuple = pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
__A : Any = pipe(**self.get_dummy_inputs(__lowercase ) )
__A : Optional[Any] = output.image_embeds
__A : Union[str, Any] = pipe(
**self.get_dummy_inputs(__lowercase ) , return_dict=__lowercase , )[0]
__A : Tuple = image[0, -10:]
__A : List[str] = image_from_tuple[0, -10:]
assert image.shape == (1, 32)
__A : Union[str, Any] = np.array(
[-0.0_5_3_2, 1.7_1_2_0, 0.3_6_5_6, -1.0_8_5_2, -0.8_9_4_6, -1.1_7_5_6, 0.4_3_4_8, 0.2_4_8_2, 0.5_1_4_6, -0.1_1_5_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def UpperCAmelCase_ ( self ):
__A : int = torch_device == 'cpu'
__A : Tuple = True
__A : Tuple = False
self._test_inference_batch_single_identical(
test_max_difference=__lowercase , relax_max_difference=__lowercase , test_mean_pixel_difference=__lowercase , )
@skip_mps
def UpperCAmelCase_ ( self ):
__A : Optional[Any] = torch_device == 'cpu'
__A : Optional[int] = False
self._test_attention_slicing_forward_pass(
test_max_difference=__lowercase , test_mean_pixel_difference=__lowercase , )
| 280
|
class SCREAMING_SNAKE_CASE :
def __init__( self : List[Any] , __lowercase : Union[str, Any] ):
'''simple docstring'''
__a = val
__a = None
__a = None
def UpperCamelCase_ ( self : Union[str, Any] , __lowercase : Any ):
'''simple docstring'''
if self.val:
if val < self.val:
if self.left is None:
__a = Node(__lowercase )
else:
self.left.insert(__lowercase )
elif val > self.val:
if self.right is None:
__a = Node(__lowercase )
else:
self.right.insert(__lowercase )
else:
__a = val
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
if root:
inorder(root.left , _SCREAMING_SNAKE_CASE )
res.append(root.val )
inorder(root.right , _SCREAMING_SNAKE_CASE )
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
if len(_SCREAMING_SNAKE_CASE ) == 0:
return arr
__a = Node(arr[0] )
for i in range(1 , len(_SCREAMING_SNAKE_CASE ) ):
root.insert(arr[i] )
# Traverse BST in order.
__a = []
inorder(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return res
if __name__ == "__main__":
print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
| 302
| 0
|
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class a__ ( lowerCamelCase__ ):
A = (EulerDiscreteScheduler,)
A = 10
def __UpperCamelCase ( self : Optional[int],**_A : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = {
"num_train_timesteps": 1100,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
}
config.update(**__lowercase )
return config
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=__lowercase )
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
for beta_start, beta_end in zip([0.00001, 0.0001, 0.001],[0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=__lowercase,beta_end=__lowercase )
def __UpperCamelCase ( self : int ):
"""simple docstring"""
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=__lowercase )
def __UpperCamelCase ( self : int ):
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__lowercase )
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ : str = self.get_scheduler_config()
SCREAMING_SNAKE_CASE_ : List[Any] = scheduler_class(**__lowercase )
scheduler.set_timesteps(self.num_inference_steps )
SCREAMING_SNAKE_CASE_ : Any = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : Tuple = self.dummy_model()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
SCREAMING_SNAKE_CASE_ : Dict = sample.to(__lowercase )
for i, t in enumerate(scheduler.timesteps ):
SCREAMING_SNAKE_CASE_ : List[Any] = scheduler.scale_model_input(__lowercase,__lowercase )
SCREAMING_SNAKE_CASE_ : Tuple = model(__lowercase,__lowercase )
SCREAMING_SNAKE_CASE_ : List[Any] = scheduler.step(__lowercase,__lowercase,__lowercase,generator=__lowercase )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = output.prev_sample
SCREAMING_SNAKE_CASE_ : Tuple = torch.sum(torch.abs(__lowercase ) )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.mean(torch.abs(__lowercase ) )
assert abs(result_sum.item() - 10.0807 ) < 1E-2
assert abs(result_mean.item() - 0.0131 ) < 1E-3
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ : Tuple = self.get_scheduler_config(prediction_type="v_prediction" )
SCREAMING_SNAKE_CASE_ : List[str] = scheduler_class(**__lowercase )
scheduler.set_timesteps(self.num_inference_steps )
SCREAMING_SNAKE_CASE_ : Tuple = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : int = self.dummy_model()
SCREAMING_SNAKE_CASE_ : Dict = self.dummy_sample_deter * scheduler.init_noise_sigma
SCREAMING_SNAKE_CASE_ : Dict = sample.to(__lowercase )
for i, t in enumerate(scheduler.timesteps ):
SCREAMING_SNAKE_CASE_ : Optional[int] = scheduler.scale_model_input(__lowercase,__lowercase )
SCREAMING_SNAKE_CASE_ : str = model(__lowercase,__lowercase )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = scheduler.step(__lowercase,__lowercase,__lowercase,generator=__lowercase )
SCREAMING_SNAKE_CASE_ : List[Any] = output.prev_sample
SCREAMING_SNAKE_CASE_ : Tuple = torch.sum(torch.abs(__lowercase ) )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.mean(torch.abs(__lowercase ) )
assert abs(result_sum.item() - 0.0002 ) < 1E-2
assert abs(result_mean.item() - 2.26_76E-06 ) < 1E-3
def __UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ : Dict = self.get_scheduler_config()
SCREAMING_SNAKE_CASE_ : List[str] = scheduler_class(**__lowercase )
scheduler.set_timesteps(self.num_inference_steps,device=__lowercase )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : Any = self.dummy_model()
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = sample.to(__lowercase )
for t in scheduler.timesteps:
SCREAMING_SNAKE_CASE_ : Tuple = scheduler.scale_model_input(__lowercase,__lowercase )
SCREAMING_SNAKE_CASE_ : Dict = model(__lowercase,__lowercase )
SCREAMING_SNAKE_CASE_ : int = scheduler.step(__lowercase,__lowercase,__lowercase,generator=__lowercase )
SCREAMING_SNAKE_CASE_ : List[Any] = output.prev_sample
SCREAMING_SNAKE_CASE_ : Any = torch.sum(torch.abs(__lowercase ) )
SCREAMING_SNAKE_CASE_ : List[str] = torch.mean(torch.abs(__lowercase ) )
assert abs(result_sum.item() - 10.0807 ) < 1E-2
assert abs(result_mean.item() - 0.0131 ) < 1E-3
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ : int = self.get_scheduler_config()
SCREAMING_SNAKE_CASE_ : Optional[int] = scheduler_class(**__lowercase,use_karras_sigmas=__lowercase )
scheduler.set_timesteps(self.num_inference_steps,device=__lowercase )
SCREAMING_SNAKE_CASE_ : int = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : Dict = self.dummy_model()
SCREAMING_SNAKE_CASE_ : str = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = sample.to(__lowercase )
for t in scheduler.timesteps:
SCREAMING_SNAKE_CASE_ : Optional[int] = scheduler.scale_model_input(__lowercase,__lowercase )
SCREAMING_SNAKE_CASE_ : str = model(__lowercase,__lowercase )
SCREAMING_SNAKE_CASE_ : Optional[Any] = scheduler.step(__lowercase,__lowercase,__lowercase,generator=__lowercase )
SCREAMING_SNAKE_CASE_ : Optional[int] = output.prev_sample
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.sum(torch.abs(__lowercase ) )
SCREAMING_SNAKE_CASE_ : List[Any] = torch.mean(torch.abs(__lowercase ) )
assert abs(result_sum.item() - 124.52299499511719 ) < 1E-2
assert abs(result_mean.item() - 0.16213932633399963 ) < 1E-3
| 18
|
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
__a = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__lowercase , """width_multiplier""" ) )
class SCREAMING_SNAKE_CASE :
def __init__( self : Dict , __lowercase : Union[str, Any] , __lowercase : Dict=13 , __lowercase : int=64 , __lowercase : Tuple=2 , __lowercase : Tuple=3 , __lowercase : Tuple="swish" , __lowercase : List[Any]=3 , __lowercase : List[str]=32 , __lowercase : int=0.1 , __lowercase : Union[str, Any]=0.02 , __lowercase : Optional[int]=True , __lowercase : Dict=True , __lowercase : Tuple=10 , __lowercase : str=None , __lowercase : Optional[Any]=0.25 , __lowercase : str=0.0 , __lowercase : Optional[Any]=0.0 , ):
'''simple docstring'''
__a = parent
__a = batch_size
__a = image_size
__a = patch_size
__a = num_channels
__a = make_divisible(512 * width_multiplier , divisor=8 )
__a = hidden_act
__a = conv_kernel_size
__a = output_stride
__a = classifier_dropout_prob
__a = use_labels
__a = is_training
__a = num_labels
__a = initializer_range
__a = scope
__a = width_multiplier
__a = ffn_dropout
__a = attn_dropout
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
__a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a = None
__a = None
if self.use_labels:
__a = ids_tensor([self.batch_size] , self.num_labels )
__a = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__a = self.get_config()
return config, pixel_values, labels, pixel_labels
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
return MobileViTVaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , )
def UpperCamelCase_ ( self : Tuple , __lowercase : Optional[Any] , __lowercase : int , __lowercase : Optional[Any] , __lowercase : Tuple ):
'''simple docstring'''
__a = MobileViTVaModel(config=__lowercase )
model.to(__lowercase )
model.eval()
__a = model(__lowercase )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def UpperCamelCase_ ( self : Union[str, Any] , __lowercase : List[Any] , __lowercase : str , __lowercase : Optional[int] , __lowercase : Union[str, Any] ):
'''simple docstring'''
__a = self.num_labels
__a = MobileViTVaForImageClassification(__lowercase )
model.to(__lowercase )
model.eval()
__a = model(__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self : int , __lowercase : str , __lowercase : Any , __lowercase : int , __lowercase : List[str] ):
'''simple docstring'''
__a = self.num_labels
__a = MobileViTVaForSemanticSegmentation(__lowercase )
model.to(__lowercase )
model.eval()
__a = model(__lowercase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
__a = model(__lowercase , labels=__lowercase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
__a = self.prepare_config_and_inputs()
__a , __a , __a , __a = config_and_inputs
__a = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
__lowerCamelCase : List[Any] =(
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
__lowerCamelCase : Any =(
{
'feature-extraction': MobileViTVaModel,
'image-classification': MobileViTVaForImageClassification,
'image-segmentation': MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__lowerCamelCase : Dict =False
__lowerCamelCase : Optional[Any] =False
__lowerCamelCase : int =False
__lowerCamelCase : Any =False
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
__a = MobileViTVaModelTester(self )
__a = MobileViTVaConfigTester(self , config_class=__lowercase , has_text_modality=__lowercase )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""MobileViTV2 does not use inputs_embeds""" )
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
pass
@unittest.skip(reason="""MobileViTV2 does not support input and output embeddings""" )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
pass
@unittest.skip(reason="""MobileViTV2 does not output attentions""" )
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason="""Got `CUDA error: misaligned address` for tests after this one being run.""" )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = model_class(__lowercase )
__a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a = [*signature.parameters.keys()]
__a = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __lowercase )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowercase )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
def check_hidden_states_output(__lowercase : List[str] , __lowercase : Optional[int] , __lowercase : List[str] ):
__a = model_class(__lowercase )
model.to(__lowercase )
model.eval()
with torch.no_grad():
__a = model(**self._prepare_for_class(__lowercase , __lowercase ) )
__a = outputs.hidden_states
__a = 5
self.assertEqual(len(__lowercase ) , __lowercase )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
__a = 2
for i in range(len(__lowercase ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = True
check_hidden_states_output(__lowercase , __lowercase , __lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__a = True
check_hidden_states_output(__lowercase , __lowercase , __lowercase )
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowercase )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__lowercase )
@slow
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a = MobileViTVaModel.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
def lowerCAmelCase__ ( ):
"""simple docstring"""
__a = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@cached_property
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
return (
MobileViTImageProcessor.from_pretrained("""apple/mobilevitv2-1.0-imagenet1k-256""" )
if is_vision_available()
else None
)
@slow
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
__a = MobileViTVaForImageClassification.from_pretrained("""apple/mobilevitv2-1.0-imagenet1k-256""" ).to(
__lowercase )
__a = self.default_image_processor
__a = prepare_img()
__a = image_processor(images=__lowercase , return_tensors="""pt""" ).to(__lowercase )
# forward pass
with torch.no_grad():
__a = model(**__lowercase )
# verify the logits
__a = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __lowercase )
__a = torch.tensor([-1.6336E00, -7.3204E-02, -5.1883E-01] ).to(__lowercase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowercase , atol=1E-4 ) )
@slow
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
__a = MobileViTVaForSemanticSegmentation.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
__a = model.to(__lowercase )
__a = MobileViTImageProcessor.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
__a = prepare_img()
__a = image_processor(images=__lowercase , return_tensors="""pt""" ).to(__lowercase )
# forward pass
with torch.no_grad():
__a = model(**__lowercase )
__a = outputs.logits
# verify the logits
__a = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , __lowercase )
__a = torch.tensor(
[
[[7.0863, 7.1525, 6.8201], [6.6931, 6.8770, 6.8933], [6.2978, 7.0366, 6.9636]],
[[-3.7134, -3.6712, -3.6675], [-3.5825, -3.3549, -3.4777], [-3.3435, -3.3979, -3.2857]],
[[-2.9329, -2.8003, -2.7369], [-3.0564, -2.4780, -2.0207], [-2.6889, -1.9298, -1.7640]],
] , device=__lowercase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __lowercase , atol=1E-4 ) )
@slow
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
__a = MobileViTVaForSemanticSegmentation.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
__a = model.to(__lowercase )
__a = MobileViTImageProcessor.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
__a = prepare_img()
__a = image_processor(images=__lowercase , return_tensors="""pt""" ).to(__lowercase )
# forward pass
with torch.no_grad():
__a = model(**__lowercase )
__a = outputs.logits.detach().cpu()
__a = image_processor.post_process_semantic_segmentation(outputs=__lowercase , target_sizes=[(50, 60)] )
__a = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , __lowercase )
__a = image_processor.post_process_semantic_segmentation(outputs=__lowercase )
__a = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , __lowercase )
| 302
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase_ : Dict = {
"""configuration_roberta""": ["""ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RobertaConfig""", """RobertaOnnxConfig"""],
"""tokenization_roberta""": ["""RobertaTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Any = ["""RobertaTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Optional[Any] = [
"""ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RobertaForCausalLM""",
"""RobertaForMaskedLM""",
"""RobertaForMultipleChoice""",
"""RobertaForQuestionAnswering""",
"""RobertaForSequenceClassification""",
"""RobertaForTokenClassification""",
"""RobertaModel""",
"""RobertaPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : str = [
"""TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRobertaForCausalLM""",
"""TFRobertaForMaskedLM""",
"""TFRobertaForMultipleChoice""",
"""TFRobertaForQuestionAnswering""",
"""TFRobertaForSequenceClassification""",
"""TFRobertaForTokenClassification""",
"""TFRobertaMainLayer""",
"""TFRobertaModel""",
"""TFRobertaPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Optional[int] = [
"""FlaxRobertaForCausalLM""",
"""FlaxRobertaForMaskedLM""",
"""FlaxRobertaForMultipleChoice""",
"""FlaxRobertaForQuestionAnswering""",
"""FlaxRobertaForSequenceClassification""",
"""FlaxRobertaForTokenClassification""",
"""FlaxRobertaModel""",
"""FlaxRobertaPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig
from .tokenization_roberta import RobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roberta_fast import RobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta import (
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
RobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta import (
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForMultipleChoice,
TFRobertaForQuestionAnswering,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaMainLayer,
TFRobertaModel,
TFRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
FlaxRobertaPreTrainedModel,
)
else:
import sys
lowerCamelCase_ : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 81
|
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import torch
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
@dataclass
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
__lowerCamelCase : Union[List[np.ndarray], torch.FloatTensor]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_text_to_video_synth import TextToVideoSDPipeline
from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401
from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
| 302
| 0
|
"""simple docstring"""
import logging
import torch
from accelerate import Accelerator
from arguments import EvaluationArguments
from datasets import load_dataset
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed
class __A ( lowerCamelCase__ ):
'''simple docstring'''
def __init__( self : List[str] ,_snake_case : str ,_snake_case : Union[str, Any] ,_snake_case : Any=1_024 ,_snake_case : Optional[Any]=1_024 ,_snake_case : Any=3.6 ) -> Dict:
"""simple docstring"""
lowercase__ : int = tokenizer
lowercase__ : Union[str, Any] = tokenizer.bos_token_id
lowercase__ : Optional[Any] = dataset
lowercase__ : Any = seq_length
lowercase__ : Any = seq_length * chars_per_token * num_of_sequences
def __iter__( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
lowercase__ : Optional[int] = iter(self.dataset )
lowercase__ : Optional[int] = True
while more_examples:
lowercase__ , lowercase__ : List[str] = [], 0
while True:
if buffer_len >= self.input_characters:
break
try:
buffer.append(next(__lowercase )['''content'''] )
buffer_len += len(buffer[-1] )
except StopIteration:
lowercase__ : Optional[Any] = False
break
lowercase__ : Dict = tokenizer(__lowercase ,truncation=__lowercase )['''input_ids''']
lowercase__ : Dict = []
for tokenized_input in tokenized_inputs:
all_token_ids.extend(tokenized_input + [self.concat_token_id] )
for i in range(0 ,len(__lowercase ) ,self.seq_length ):
lowercase__ : List[Any] = all_token_ids[i : i + self.seq_length]
if len(__lowercase ) == self.seq_length:
yield torch.tensor(__lowercase )
def __UpperCAmelCase ( __lowerCamelCase ) -> Optional[Any]:
lowercase__ : Any = {'''streaming''': True}
lowercase__ : Optional[Any] = load_dataset(args.dataset_name , split='''train''' , **_SCREAMING_SNAKE_CASE )
lowercase__ : Dict = ConstantLengthDataset(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , seq_length=args.seq_length )
lowercase__ : Union[str, Any] = DataLoader(_SCREAMING_SNAKE_CASE , batch_size=args.batch_size )
return eval_dataloader
def __UpperCAmelCase ( __lowerCamelCase ) -> str:
model.eval()
lowercase__ : int = []
for step, batch in enumerate(_SCREAMING_SNAKE_CASE ):
with torch.no_grad():
lowercase__ : Tuple = model(_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = outputs.loss.repeat(args.batch_size )
losses.append(accelerator.gather(_SCREAMING_SNAKE_CASE ) )
if args.max_eval_steps > 0 and step >= args.max_eval_steps:
break
lowercase__ : Any = torch.mean(torch.cat(_SCREAMING_SNAKE_CASE ) )
try:
lowercase__ : str = torch.exp(_SCREAMING_SNAKE_CASE )
except OverflowError:
lowercase__ : Dict = float('''inf''' )
return loss.item(), perplexity.item()
# Setup Accelerator
lowerCAmelCase_ = Accelerator()
# Parse configuration
lowerCAmelCase_ = HfArgumentParser(EvaluationArguments)
lowerCAmelCase_ = parser.parse_args()
set_seed(args.seed)
# Logging
lowerCAmelCase_ = logging.getLogger(__name__)
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
# Load model and tokenizer
lowerCAmelCase_ = AutoModelForCausalLM.from_pretrained(args.model_ckpt)
lowerCAmelCase_ = AutoTokenizer.from_pretrained(args.model_ckpt)
# Load dataset and dataloader
lowerCAmelCase_ = create_dataloader(args)
# Prepare everything with our `accelerator`.
lowerCAmelCase_ ,lowerCAmelCase_ = accelerator.prepare(model, eval_dataloader)
# Evaluate and save the last checkpoint
logger.info('Evaluating and saving model after training')
lowerCAmelCase_ ,lowerCAmelCase_ = evaluate(args)
logger.info(F'''loss/eval: {eval_loss}, perplexity: {perplexity}''')
| 16
|
import string
import numpy
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
return b if a == 0 else greatest_common_divisor(b % a , _SCREAMING_SNAKE_CASE )
class SCREAMING_SNAKE_CASE :
__lowerCamelCase : List[str] =string.ascii_uppercase + string.digits
# This cipher takes alphanumerics into account
# i.e. a total of 36 characters
# take x and return x % len(key_string)
__lowerCamelCase : List[Any] =numpy.vectorize(lambda lowerCamelCase__ : x % 36 )
__lowerCamelCase : Optional[Any] =numpy.vectorize(lowerCamelCase__ )
def __init__( self : Union[str, Any] , __lowercase : numpy.ndarray ):
'''simple docstring'''
__a = self.modulus(__lowercase ) # mod36 calc's on the encrypt key
self.check_determinant() # validate the determinant of the encryption key
__a = encrypt_key.shape[0]
def UpperCamelCase_ ( self : Dict , __lowercase : str ):
'''simple docstring'''
return self.key_string.index(__lowercase )
def UpperCamelCase_ ( self : Dict , __lowercase : int ):
'''simple docstring'''
return self.key_string[round(__lowercase )]
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
__a = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
__a = det % len(self.key_string )
__a = len(self.key_string )
if greatest_common_divisor(__lowercase , len(self.key_string ) ) != 1:
__a = (
F"determinant modular {req_l} of encryption key({det}) "
F"is not co prime w.r.t {req_l}.\nTry another key."
)
raise ValueError(__lowercase )
def UpperCamelCase_ ( self : Dict , __lowercase : str ):
'''simple docstring'''
__a = [char for char in text.upper() if char in self.key_string]
__a = chars[-1]
while len(__lowercase ) % self.break_key != 0:
chars.append(__lowercase )
return "".join(__lowercase )
def UpperCamelCase_ ( self : List[str] , __lowercase : str ):
'''simple docstring'''
__a = self.process_text(text.upper() )
__a = """"""
for i in range(0 , len(__lowercase ) - self.break_key + 1 , self.break_key ):
__a = text[i : i + self.break_key]
__a = [self.replace_letters(__lowercase ) for char in batch]
__a = numpy.array([vec] ).T
__a = self.modulus(self.encrypt_key.dot(__lowercase ) ).T.tolist()[
0
]
__a = """""".join(
self.replace_digits(__lowercase ) for num in batch_encrypted )
encrypted += encrypted_batch
return encrypted
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
__a = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
__a = det % len(self.key_string )
__a = None
for i in range(len(self.key_string ) ):
if (det * i) % len(self.key_string ) == 1:
__a = i
break
__a = (
det_inv
* numpy.linalg.det(self.encrypt_key )
* numpy.linalg.inv(self.encrypt_key )
)
return self.to_int(self.modulus(__lowercase ) )
def UpperCamelCase_ ( self : Any , __lowercase : str ):
'''simple docstring'''
__a = self.make_decrypt_key()
__a = self.process_text(text.upper() )
__a = """"""
for i in range(0 , len(__lowercase ) - self.break_key + 1 , self.break_key ):
__a = text[i : i + self.break_key]
__a = [self.replace_letters(__lowercase ) for char in batch]
__a = numpy.array([vec] ).T
__a = self.modulus(decrypt_key.dot(__lowercase ) ).T.tolist()[0]
__a = """""".join(
self.replace_digits(__lowercase ) for num in batch_decrypted )
decrypted += decrypted_batch
return decrypted
def lowerCAmelCase__ ( ):
"""simple docstring"""
__a = int(input("""Enter the order of the encryption key: """ ) )
__a = []
print("""Enter each row of the encryption key with space separated integers""" )
for _ in range(_SCREAMING_SNAKE_CASE ):
__a = [int(_SCREAMING_SNAKE_CASE ) for x in input().split()]
hill_matrix.append(_SCREAMING_SNAKE_CASE )
__a = HillCipher(numpy.array(_SCREAMING_SNAKE_CASE ) )
print("""Would you like to encrypt or decrypt some text? (1 or 2)""" )
__a = input("""\n1. Encrypt\n2. Decrypt\n""" )
if option == "1":
__a = input("""What text would you like to encrypt?: """ )
print("""Your encrypted text is:""" )
print(hc.encrypt(_SCREAMING_SNAKE_CASE ) )
elif option == "2":
__a = input("""What text would you like to decrypt?: """ )
print("""Your decrypted text is:""" )
print(hc.decrypt(_SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 302
| 0
|
from __future__ import annotations
from collections.abc import Iterator
class _snake_case :
def __init__( self , _a ):
__magic_name__ : Tuple = value
__magic_name__ : Optional[int] = None
__magic_name__ : Any = None
class _snake_case :
def __init__( self , _a ):
__magic_name__ : Any = tree
def SCREAMING_SNAKE_CASE ( self , _a ):
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left ) + self.depth_first_search(node.right )
)
def __iter__( self ):
yield self.depth_first_search(self.tree )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 281
|
from typing import List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"""huggingface/autoformer-tourism-monthly""": """https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json""",
}
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
__lowerCamelCase : List[Any] ='autoformer'
__lowerCamelCase : str ={
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
'num_hidden_layers': 'encoder_layers',
}
def __init__( self : List[Any] , __lowercase : Optional[int] = None , __lowercase : Optional[int] = None , __lowercase : str = "student_t" , __lowercase : str = "nll" , __lowercase : int = 1 , __lowercase : List[int] = [1, 2, 3, 4, 5, 6, 7] , __lowercase : bool = True , __lowercase : int = 0 , __lowercase : int = 0 , __lowercase : int = 0 , __lowercase : int = 0 , __lowercase : Optional[List[int]] = None , __lowercase : Optional[List[int]] = None , __lowercase : int = 64 , __lowercase : int = 2 , __lowercase : int = 2 , __lowercase : int = 2 , __lowercase : int = 2 , __lowercase : int = 32 , __lowercase : int = 32 , __lowercase : str = "gelu" , __lowercase : float = 0.1 , __lowercase : float = 0.1 , __lowercase : float = 0.1 , __lowercase : float = 0.1 , __lowercase : float = 0.1 , __lowercase : int = 100 , __lowercase : float = 0.02 , __lowercase : bool = True , __lowercase : List[Any]=True , __lowercase : int = 10 , __lowercase : int = 25 , __lowercase : int = 3 , **__lowercase : Optional[int] , ):
'''simple docstring'''
# time series specific configuration
__a = prediction_length
__a = context_length if context_length is not None else prediction_length
__a = distribution_output
__a = loss
__a = input_size
__a = num_time_features
__a = lags_sequence
__a = scaling
__a = num_dynamic_real_features
__a = num_static_real_features
__a = num_static_categorical_features
if cardinality is not None and num_static_categorical_features > 0:
if len(__lowercase ) != num_static_categorical_features:
raise ValueError(
"""The cardinality should be a list of the same length as `num_static_categorical_features`""" )
__a = cardinality
else:
__a = [0]
if embedding_dimension is not None and num_static_categorical_features > 0:
if len(__lowercase ) != num_static_categorical_features:
raise ValueError(
"""The embedding dimension should be a list of the same length as `num_static_categorical_features`""" )
__a = embedding_dimension
else:
__a = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
__a = num_parallel_samples
# Transformer architecture configuration
__a = input_size * len(self.lags_sequence ) + self._number_of_features
__a = d_model
__a = encoder_attention_heads
__a = decoder_attention_heads
__a = encoder_ffn_dim
__a = decoder_ffn_dim
__a = encoder_layers
__a = decoder_layers
__a = dropout
__a = attention_dropout
__a = activation_dropout
__a = encoder_layerdrop
__a = decoder_layerdrop
__a = activation_function
__a = init_std
__a = use_cache
# Autoformer
__a = label_length
__a = moving_average
__a = autocorrelation_factor
super().__init__(is_encoder_decoder=__lowercase , **__lowercase )
@property
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 302
| 0
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Union[str, Any]:
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(_SCREAMING_SNAKE_CASE ) )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> List[str]:
if index == len(_SCREAMING_SNAKE_CASE ):
return True
# Recursive Step
for i in range(_SCREAMING_SNAKE_CASE ):
if valid_coloring(graph[index] , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
# Color current vertex
lowerCamelCase__ : Dict = i
# Validate coloring
if util_color(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , index + 1 ):
return True
# Backtrack
lowerCamelCase__ : List[Any] = -1
return False
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> Dict:
lowerCamelCase__ : List[Any] = [-1] * len(_SCREAMING_SNAKE_CASE )
if util_color(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 0 ):
return colored_vertices
return []
| 41
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase__ = {
"""configuration_electra""": ["""ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ElectraConfig""", """ElectraOnnxConfig"""],
"""tokenization_electra""": ["""ElectraTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""ElectraTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ElectraForCausalLM""",
"""ElectraForMaskedLM""",
"""ElectraForMultipleChoice""",
"""ElectraForPreTraining""",
"""ElectraForQuestionAnswering""",
"""ElectraForSequenceClassification""",
"""ElectraForTokenClassification""",
"""ElectraModel""",
"""ElectraPreTrainedModel""",
"""load_tf_weights_in_electra""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFElectraForMaskedLM""",
"""TFElectraForMultipleChoice""",
"""TFElectraForPreTraining""",
"""TFElectraForQuestionAnswering""",
"""TFElectraForSequenceClassification""",
"""TFElectraForTokenClassification""",
"""TFElectraModel""",
"""TFElectraPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""FlaxElectraForCausalLM""",
"""FlaxElectraForMaskedLM""",
"""FlaxElectraForMultipleChoice""",
"""FlaxElectraForPreTraining""",
"""FlaxElectraForQuestionAnswering""",
"""FlaxElectraForSequenceClassification""",
"""FlaxElectraForTokenClassification""",
"""FlaxElectraModel""",
"""FlaxElectraPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 302
| 0
|
a__: str = frozenset(
[
'prompt',
'height',
'width',
'guidance_scale',
'negative_prompt',
'prompt_embeds',
'negative_prompt_embeds',
'cross_attention_kwargs',
]
)
a__: int = frozenset(['prompt', 'negative_prompt'])
a__: int = frozenset([])
a__: Dict = frozenset(['image'])
a__: int = frozenset(
[
'image',
'height',
'width',
'guidance_scale',
]
)
a__: Union[str, Any] = frozenset(['image'])
a__: Union[str, Any] = frozenset(
[
'prompt',
'image',
'height',
'width',
'guidance_scale',
'negative_prompt',
'prompt_embeds',
'negative_prompt_embeds',
]
)
a__: Optional[Any] = frozenset(['prompt', 'image', 'negative_prompt'])
a__: Dict = frozenset(
[
# Text guided image variation with an image mask
'prompt',
'image',
'mask_image',
'height',
'width',
'guidance_scale',
'negative_prompt',
'prompt_embeds',
'negative_prompt_embeds',
]
)
a__: List[str] = frozenset(['prompt', 'image', 'mask_image', 'negative_prompt'])
a__: str = frozenset(
[
# image variation with an image mask
'image',
'mask_image',
'height',
'width',
'guidance_scale',
]
)
a__: List[str] = frozenset(['image', 'mask_image'])
a__: Dict = frozenset(
[
'example_image',
'image',
'mask_image',
'height',
'width',
'guidance_scale',
]
)
a__: Optional[Any] = frozenset(['example_image', 'image', 'mask_image'])
a__: List[str] = frozenset(['class_labels'])
a__: int = frozenset(['class_labels'])
a__: str = frozenset(['batch_size'])
a__: int = frozenset([])
a__: Any = frozenset(['batch_size'])
a__: Any = frozenset([])
a__: Union[str, Any] = frozenset(
[
'prompt',
'audio_length_in_s',
'guidance_scale',
'negative_prompt',
'prompt_embeds',
'negative_prompt_embeds',
'cross_attention_kwargs',
]
)
a__: Tuple = frozenset(['prompt', 'negative_prompt'])
a__: List[str] = frozenset(['input_tokens'])
a__: str = frozenset(['input_tokens'])
| 193
|
from __future__ import annotations
lowerCamelCase__ = """#"""
class SCREAMING_SNAKE_CASE :
def __init__( self : Optional[Any] ):
'''simple docstring'''
__a = {}
def UpperCamelCase_ ( self : Optional[Any] , __lowercase : str ):
'''simple docstring'''
__a = self._trie
for char in text:
if char not in trie:
__a = {}
__a = trie[char]
__a = True
def UpperCamelCase_ ( self : Tuple , __lowercase : str ):
'''simple docstring'''
__a = self._trie
for char in prefix:
if char in trie:
__a = trie[char]
else:
return []
return self._elements(__lowercase )
def UpperCamelCase_ ( self : Optional[int] , __lowercase : dict ):
'''simple docstring'''
__a = []
for c, v in d.items():
__a = [""" """] if c == END else [(c + s) for s in self._elements(__lowercase )]
result.extend(__lowercase )
return tuple(__lowercase )
lowerCamelCase__ = Trie()
lowerCamelCase__ = ("""depart""", """detergent""", """daring""", """dog""", """deer""", """deal""")
for word in words:
trie.insert_word(word)
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
__a = trie.find_word(_SCREAMING_SNAKE_CASE )
return tuple(string + word for word in suffixes )
def lowerCAmelCase__ ( ):
"""simple docstring"""
print(autocomplete_using_trie("""de""" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 302
| 0
|
def UpperCamelCase ( _A ):
"""simple docstring"""
return sum(i for i in range(1, number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print("Program to check whether a number is a Perfect number or not...")
__magic_name__: Dict = int(input("Enter number: ").strip())
print(F"""{number} is {'' if perfect(number) else 'not '}a Perfect Number.""")
| 342
|
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
__lowerCamelCase : torch.FloatTensor
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ , lowerCamelCase__ ):
@register_to_config
def __init__( self : Dict , __lowercase : int = 32 , __lowercase : int = 64 , __lowercase : int = 20 , __lowercase : int = 768 , __lowercase : Any=77 , __lowercase : Optional[int]=4 , __lowercase : float = 0.0 , __lowercase : str = "silu" , __lowercase : Optional[str] = None , __lowercase : Optional[str] = None , __lowercase : Optional[str] = "linear" , __lowercase : Optional[str] = "prd" , __lowercase : Optional[int] = None , __lowercase : Optional[int] = None , __lowercase : Optional[int] = None , ):
'''simple docstring'''
super().__init__()
__a = num_attention_heads
__a = attention_head_dim
__a = num_attention_heads * attention_head_dim
__a = additional_embeddings
__a = time_embed_dim or inner_dim
__a = embedding_proj_dim or embedding_dim
__a = clip_embed_dim or embedding_dim
__a = Timesteps(__lowercase , __lowercase , 0 )
__a = TimestepEmbedding(__lowercase , __lowercase , out_dim=__lowercase , act_fn=__lowercase )
__a = nn.Linear(__lowercase , __lowercase )
if embedding_proj_norm_type is None:
__a = None
elif embedding_proj_norm_type == "layer":
__a = nn.LayerNorm(__lowercase )
else:
raise ValueError(F"unsupported embedding_proj_norm_type: {embedding_proj_norm_type}" )
__a = nn.Linear(__lowercase , __lowercase )
if encoder_hid_proj_type is None:
__a = None
elif encoder_hid_proj_type == "linear":
__a = nn.Linear(__lowercase , __lowercase )
else:
raise ValueError(F"unsupported encoder_hid_proj_type: {encoder_hid_proj_type}" )
__a = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , __lowercase ) )
if added_emb_type == "prd":
__a = nn.Parameter(torch.zeros(1 , 1 , __lowercase ) )
elif added_emb_type is None:
__a = None
else:
raise ValueError(
F"`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `'prd'` or `None`." )
__a = nn.ModuleList(
[
BasicTransformerBlock(
__lowercase , __lowercase , __lowercase , dropout=__lowercase , activation_fn="""gelu""" , attention_bias=__lowercase , )
for d in range(__lowercase )
] )
if norm_in_type == "layer":
__a = nn.LayerNorm(__lowercase )
elif norm_in_type is None:
__a = None
else:
raise ValueError(F"Unsupported norm_in_type: {norm_in_type}." )
__a = nn.LayerNorm(__lowercase )
__a = nn.Linear(__lowercase , __lowercase )
__a = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -10000.0 )
causal_attention_mask.triu_(1 )
__a = causal_attention_mask[None, ...]
self.register_buffer("""causal_attention_mask""" , __lowercase , persistent=__lowercase )
__a = nn.Parameter(torch.zeros(1 , __lowercase ) )
__a = nn.Parameter(torch.zeros(1 , __lowercase ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
__a = {}
def fn_recursive_add_processors(__lowercase : str , __lowercase : torch.nn.Module , __lowercase : Dict[str, AttentionProcessor] ):
if hasattr(__lowercase , """set_processor""" ):
__a = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F"{name}.{sub_name}" , __lowercase , __lowercase )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(__lowercase , __lowercase , __lowercase )
return processors
def UpperCamelCase_ ( self : List[str] , __lowercase : Union[AttentionProcessor, Dict[str, AttentionProcessor]] ):
'''simple docstring'''
__a = len(self.attn_processors.keys() )
if isinstance(__lowercase , __lowercase ) and len(__lowercase ) != count:
raise ValueError(
F"A dict of processors was passed, but the number of processors {len(__lowercase )} does not match the"
F" number of attention layers: {count}. Please make sure to pass {count} processor classes." )
def fn_recursive_attn_processor(__lowercase : str , __lowercase : torch.nn.Module , __lowercase : Dict ):
if hasattr(__lowercase , """set_processor""" ):
if not isinstance(__lowercase , __lowercase ):
module.set_processor(__lowercase )
else:
module.set_processor(processor.pop(F"{name}.processor" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F"{name}.{sub_name}" , __lowercase , __lowercase )
for name, module in self.named_children():
fn_recursive_attn_processor(__lowercase , __lowercase , __lowercase )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
self.set_attn_processor(AttnProcessor() )
def UpperCamelCase_ ( self : Union[str, Any] , __lowercase : Optional[int] , __lowercase : Union[torch.Tensor, float, int] , __lowercase : torch.FloatTensor , __lowercase : Optional[torch.FloatTensor] = None , __lowercase : Optional[torch.BoolTensor] = None , __lowercase : bool = True , ):
'''simple docstring'''
__a = hidden_states.shape[0]
__a = timestep
if not torch.is_tensor(__lowercase ):
__a = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device )
elif torch.is_tensor(__lowercase ) and len(timesteps.shape ) == 0:
__a = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
__a = timesteps * torch.ones(__lowercase , dtype=timesteps.dtype , device=timesteps.device )
__a = self.time_proj(__lowercase )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
__a = timesteps_projected.to(dtype=self.dtype )
__a = self.time_embedding(__lowercase )
if self.embedding_proj_norm is not None:
__a = self.embedding_proj_norm(__lowercase )
__a = self.embedding_proj(__lowercase )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
__a = self.encoder_hidden_states_proj(__lowercase )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError("""`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set""" )
__a = self.proj_in(__lowercase )
__a = self.positional_embedding.to(hidden_states.dtype )
__a = []
__a = 0
if encoder_hidden_states is not None:
additional_embeds.append(__lowercase )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
__a = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
__a = hidden_states[:, None, :]
__a = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
__a = self.prd_embedding.to(hidden_states.dtype ).expand(__lowercase , -1 , -1 )
additional_embeds.append(__lowercase )
__a = torch.cat(
__lowercase , dim=1 , )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
__a = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
__a = F.pad(
__lowercase , (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) , value=0.0 , )
__a = hidden_states + positional_embeddings
if attention_mask is not None:
__a = (1 - attention_mask.to(hidden_states.dtype )) * -10000.0
__a = F.pad(__lowercase , (0, self.additional_embeddings) , value=0.0 )
__a = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
__a = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 )
if self.norm_in is not None:
__a = self.norm_in(__lowercase )
for block in self.transformer_blocks:
__a = block(__lowercase , attention_mask=__lowercase )
__a = self.norm_out(__lowercase )
if self.prd_embedding is not None:
__a = hidden_states[:, -1]
else:
__a = hidden_states[:, additional_embeddings_len:]
__a = self.proj_to_clip_embeddings(__lowercase )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=__lowercase )
def UpperCamelCase_ ( self : Any , __lowercase : Tuple ):
'''simple docstring'''
__a = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 302
| 0
|
'''simple docstring'''
from collections import namedtuple
_lowerCAmelCase = namedtuple('''from_to''', '''from_ to''')
_lowerCAmelCase = {
'''cubicmeter''': from_to(1, 1),
'''litre''': from_to(0.001, 1000),
'''kilolitre''': from_to(1, 1),
'''gallon''': from_to(0.0_0454, 264.172),
'''cubicyard''': from_to(0.7_6455, 1.3_0795),
'''cubicfoot''': from_to(0.028, 35.3147),
'''cup''': from_to(0.0_0023_6588, 4226.75),
}
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
if from_type not in METRIC_CONVERSION:
raise ValueError(
f"""Invalid 'from_type' value: {from_type!r} Supported values are:\n"""
+ """, """.join(_SCREAMING_SNAKE_CASE ) )
if to_type not in METRIC_CONVERSION:
raise ValueError(
f"""Invalid 'to_type' value: {to_type!r}. Supported values are:\n"""
+ """, """.join(_SCREAMING_SNAKE_CASE ) )
return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to
if __name__ == "__main__":
import doctest
doctest.testmod()
| 37
|
from functools import lru_cache
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
__a = 2
__a = set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(_SCREAMING_SNAKE_CASE )
if n > 1:
factors.add(_SCREAMING_SNAKE_CASE )
return factors
@lru_cache
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
return len(unique_prime_factors(_SCREAMING_SNAKE_CASE ) )
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : list ):
"""simple docstring"""
return len(set(_SCREAMING_SNAKE_CASE ) ) in (0, 1)
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
__a = 2
while True:
# Increment each value of a generated range
__a = [base + i for i in range(_SCREAMING_SNAKE_CASE )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
__a = [upf_len(_SCREAMING_SNAKE_CASE ) for x in group]
checker.append(_SCREAMING_SNAKE_CASE )
# If all numbers in the list are equal, return the group variable.
if equality(_SCREAMING_SNAKE_CASE ):
return group
# Increment our base variable by 1
base += 1
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int = 4 ):
"""simple docstring"""
__a = run(_SCREAMING_SNAKE_CASE )
return results[0] if len(_SCREAMING_SNAKE_CASE ) else None
if __name__ == "__main__":
print(solution())
| 302
| 0
|
import importlib.util
import json
import os
import warnings
from dataclasses import dataclass, field
import torch
from ..training_args import TrainingArguments
from ..utils import cached_property, is_sagemaker_dp_enabled, logging
lowercase : Optional[int] = logging.get_logger(__name__)
def _SCREAMING_SNAKE_CASE ( ) -> int:
'''simple docstring'''
__UpperCamelCase : List[str] = os.getenv("SM_HP_MP_PARAMETERS" , "{}")
try:
# Parse it and check the field "partitions" is included, it is required for model parallel.
__UpperCamelCase : int = json.loads(_SCREAMING_SNAKE_CASE)
if "partitions" not in smp_options:
return False
except json.JSONDecodeError:
return False
# Get the sagemaker specific framework parameters from mpi_options variable.
__UpperCamelCase : int = os.getenv("SM_FRAMEWORK_PARAMS" , "{}")
try:
# Parse it and check the field "sagemaker_distributed_dataparallel_enabled".
__UpperCamelCase : Any = json.loads(_SCREAMING_SNAKE_CASE)
if not mpi_options.get("sagemaker_mpi_enabled" , _SCREAMING_SNAKE_CASE):
return False
except json.JSONDecodeError:
return False
# Lastly, check if the `smdistributed` module is present.
return importlib.util.find_spec("smdistributed") is not None
if is_sagemaker_model_parallel_available():
import smdistributed.modelparallel.torch as smp
smp.init()
@dataclass
class lowerCamelCase__ ( lowerCamelCase__):
'''simple docstring'''
_A = field(
default='' , metadata={'help': 'Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer'} , )
def _lowerCamelCase ( self :Optional[int] ) -> Tuple:
super().__post_init__()
warnings.warn(
"`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use "
"`TrainingArguments` instead." , __lowercase , )
@cached_property
def _lowerCamelCase ( self :Optional[int] ) -> str:
logger.info("PyTorch: setting up devices" )
if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1:
logger.warning(
"torch.distributed process group is initialized, but local_rank == -1. "
"In order to use Torch DDP, launch your script with `python -m torch.distributed.launch" )
if self.no_cuda:
__UpperCamelCase : str = torch.device("cpu" )
__UpperCamelCase : List[Any] = 0
elif is_sagemaker_model_parallel_available():
__UpperCamelCase : Tuple = smp.local_rank()
__UpperCamelCase : Optional[int] = torch.device("cuda" , __lowercase )
__UpperCamelCase : Dict = 1
elif is_sagemaker_dp_enabled():
import smdistributed.dataparallel.torch.torch_smddp # noqa: F401
torch.distributed.init_process_group(backend="smddp" , timeout=self.ddp_timeout_delta )
__UpperCamelCase : Tuple = int(os.getenv("SMDATAPARALLEL_LOCAL_RANK" ) )
__UpperCamelCase : List[Any] = torch.device("cuda" , self.local_rank )
__UpperCamelCase : Dict = 1
elif self.local_rank == -1:
# if n_gpu is > 1 we'll use nn.DataParallel.
# If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`
# Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will
# trigger an error that a device index is missing. Index 0 takes into account the
# GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0`
# will use the first GPU in that env, i.e. GPU#1
__UpperCamelCase : Union[str, Any] = torch.device("cuda:0" if torch.cuda.is_available() else "cpu" )
# Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at
# the default value.
__UpperCamelCase : Tuple = torch.cuda.device_count()
else:
# Here, we'll use torch.distributed.
# Initializes the distributed backend which will take care of synchronizing nodes/GPUs
if not torch.distributed.is_initialized():
torch.distributed.init_process_group(backend="nccl" , timeout=self.ddp_timeout_delta )
__UpperCamelCase : Dict = torch.device("cuda" , self.local_rank )
__UpperCamelCase : Tuple = 1
if device.type == "cuda":
torch.cuda.set_device(__lowercase )
return device
@property
def _lowerCamelCase ( self :Union[str, Any] ) -> Union[str, Any]:
if is_sagemaker_model_parallel_available():
return smp.dp_size()
return super().world_size
@property
def _lowerCamelCase ( self :str ) -> Tuple:
return not is_sagemaker_model_parallel_available()
@property
def _lowerCamelCase ( self :str ) -> List[Any]:
return False
| 232
|
import argparse
import json
from pathlib import Path
import torch
import torchaudio
from datasets import load_dataset
from huggingface_hub import hf_hub_download
from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase__ = logging.get_logger(__name__)
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
__a = ASTConfig()
if "10-10" in model_name:
pass
elif "speech-commands" in model_name:
__a = 128
elif "12-12" in model_name:
__a = 12
__a = 12
elif "14-14" in model_name:
__a = 14
__a = 14
elif "16-16" in model_name:
__a = 16
__a = 16
else:
raise ValueError("""Model not supported""" )
__a = """huggingface/label-files"""
if "speech-commands" in model_name:
__a = 35
__a = """speech-commands-v2-id2label.json"""
else:
__a = 527
__a = """audioset-id2label.json"""
__a = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type="""dataset""" ) , """r""" ) )
__a = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
__a = idalabel
__a = {v: k for k, v in idalabel.items()}
return config
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
if "module.v" in name:
__a = name.replace("""module.v""" , """audio_spectrogram_transformer""" )
if "cls_token" in name:
__a = name.replace("""cls_token""" , """embeddings.cls_token""" )
if "dist_token" in name:
__a = name.replace("""dist_token""" , """embeddings.distillation_token""" )
if "pos_embed" in name:
__a = name.replace("""pos_embed""" , """embeddings.position_embeddings""" )
if "patch_embed.proj" in name:
__a = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
# transformer blocks
if "blocks" in name:
__a = name.replace("""blocks""" , """encoder.layer""" )
if "attn.proj" in name:
__a = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
__a = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
__a = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
__a = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
__a = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
__a = name.replace("""mlp.fc2""" , """output.dense""" )
# final layernorm
if "audio_spectrogram_transformer.norm" in name:
__a = name.replace("""audio_spectrogram_transformer.norm""" , """audio_spectrogram_transformer.layernorm""" )
# classifier head
if "module.mlp_head.0" in name:
__a = name.replace("""module.mlp_head.0""" , """classifier.layernorm""" )
if "module.mlp_head.1" in name:
__a = name.replace("""module.mlp_head.1""" , """classifier.dense""" )
return name
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Union[str, Any] ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
__a = orig_state_dict.pop(_SCREAMING_SNAKE_CASE )
if "qkv" in key:
__a = key.split(""".""" )
__a = int(key_split[3] )
__a = config.hidden_size
if "weight" in key:
__a = val[:dim, :]
__a = val[dim : dim * 2, :]
__a = val[-dim:, :]
else:
__a = val[:dim]
__a = val[dim : dim * 2]
__a = val[-dim:]
else:
__a = val
return orig_state_dict
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[str] ):
"""simple docstring"""
__a = [
"""module.v.head.weight""",
"""module.v.head.bias""",
"""module.v.head_dist.weight""",
"""module.v.head_dist.bias""",
]
for k in ignore_keys:
state_dict.pop(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@torch.no_grad()
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : List[str]=False ):
"""simple docstring"""
__a = get_audio_spectrogram_transformer_config(_SCREAMING_SNAKE_CASE )
__a = {
"""ast-finetuned-audioset-10-10-0.4593""": (
"""https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1"""
),
"""ast-finetuned-audioset-10-10-0.450""": (
"""https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1"""
),
"""ast-finetuned-audioset-10-10-0.448""": (
"""https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1"""
),
"""ast-finetuned-audioset-10-10-0.448-v2""": (
"""https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1"""
),
"""ast-finetuned-audioset-12-12-0.447""": (
"""https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1"""
),
"""ast-finetuned-audioset-14-14-0.443""": (
"""https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1"""
),
"""ast-finetuned-audioset-16-16-0.442""": (
"""https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1"""
),
"""ast-finetuned-speech-commands-v2""": (
"""https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1"""
),
}
# load original state_dict
__a = model_name_to_url[model_name]
__a = torch.hub.load_state_dict_from_url(_SCREAMING_SNAKE_CASE , map_location="""cpu""" )
# remove some keys
remove_keys(_SCREAMING_SNAKE_CASE )
# rename some keys
__a = convert_state_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# load 🤗 model
__a = ASTForAudioClassification(_SCREAMING_SNAKE_CASE )
model.eval()
model.load_state_dict(_SCREAMING_SNAKE_CASE )
# verify outputs on dummy input
# source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62
__a = -4.267_7393 if """speech-commands""" not in model_name else -6.84_5978
__a = 4.568_9974 if """speech-commands""" not in model_name else 5.565_4526
__a = 1024 if """speech-commands""" not in model_name else 128
__a = ASTFeatureExtractor(mean=_SCREAMING_SNAKE_CASE , std=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE )
if "speech-commands" in model_name:
__a = load_dataset("""speech_commands""" , """v0.02""" , split="""validation""" )
__a = dataset[0]["""audio"""]["""array"""]
else:
__a = hf_hub_download(
repo_id="""nielsr/audio-spectogram-transformer-checkpoint""" , filename="""sample_audio.flac""" , repo_type="""dataset""" , )
__a , __a = torchaudio.load(_SCREAMING_SNAKE_CASE )
__a = waveform.squeeze().numpy()
__a = feature_extractor(_SCREAMING_SNAKE_CASE , sampling_rate=1_6000 , return_tensors="""pt""" )
# forward pass
__a = model(**_SCREAMING_SNAKE_CASE )
__a = outputs.logits
if model_name == "ast-finetuned-audioset-10-10-0.4593":
__a = torch.tensor([-0.8760, -7.0042, -8.6602] )
elif model_name == "ast-finetuned-audioset-10-10-0.450":
__a = torch.tensor([-1.1986, -7.0903, -8.2718] )
elif model_name == "ast-finetuned-audioset-10-10-0.448":
__a = torch.tensor([-2.6128, -8.0080, -9.4344] )
elif model_name == "ast-finetuned-audioset-10-10-0.448-v2":
__a = torch.tensor([-1.5080, -7.4534, -8.8917] )
elif model_name == "ast-finetuned-audioset-12-12-0.447":
__a = torch.tensor([-0.5050, -6.5833, -8.0843] )
elif model_name == "ast-finetuned-audioset-14-14-0.443":
__a = torch.tensor([-0.3826, -7.0336, -8.2413] )
elif model_name == "ast-finetuned-audioset-16-16-0.442":
__a = torch.tensor([-1.2113, -6.9101, -8.3470] )
elif model_name == "ast-finetuned-speech-commands-v2":
__a = torch.tensor([6.1589, -8.0566, -8.7984] )
else:
raise ValueError("""Unknown model name""" )
if not torch.allclose(logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 ):
raise ValueError("""Logits don't match""" )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
print(f"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
print(f"Saving feature extractor to {pytorch_dump_folder_path}" )
feature_extractor.save_pretrained(_SCREAMING_SNAKE_CASE )
if push_to_hub:
print("""Pushing model and feature extractor to the hub...""" )
model.push_to_hub(f"MIT/{model_name}" )
feature_extractor.push_to_hub(f"MIT/{model_name}" )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""ast-finetuned-audioset-10-10-0.4593""",
type=str,
help="""Name of the Audio Spectrogram Transformer model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
lowerCamelCase__ = parser.parse_args()
convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 302
| 0
|
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
lowerCAmelCase__ = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
lowerCAmelCase__ = [ord(letter) for letter in string.ascii_lowercase]
lowerCAmelCase__ = {ord(char) for char in VALID_CHARS}
lowerCAmelCase__ = ['the', 'be', 'to', 'of', 'and', 'in', 'that', 'have']
def _UpperCAmelCase (UpperCamelCase__ : list[int] , UpperCamelCase__ : tuple[int, ...] ):
_A : str = ""
_A : List[Any] = 42
_A : Tuple = 42
_A : List[str] = 42
for keychar, cipherchar in zip(cycle(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ):
_A : Optional[Any] = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(_SCREAMING_SNAKE_CASE )
return decoded
def _UpperCAmelCase (UpperCamelCase__ : list[int] ):
_A : List[str] = []
for key in product(_SCREAMING_SNAKE_CASE , repeat=3 ):
_A : List[Any] = try_key(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if encoded is not None:
possibles.append(_SCREAMING_SNAKE_CASE )
return possibles
def _UpperCAmelCase (UpperCamelCase__ : list[str] , UpperCamelCase__ : str ):
return [possible for possible in possibles if common_word in possible.lower()]
def _UpperCAmelCase (UpperCamelCase__ : str = "p059_cipher.txt" ):
_A : Tuple = 42
_A : Optional[Any] = 42
_A : Union[str, Any] = 42
_A : List[Any] = 42
_A : int = Path(_SCREAMING_SNAKE_CASE ).parent.joinpath(_SCREAMING_SNAKE_CASE ).read_text(encoding="utf-8" )
_A : Dict = [int(_SCREAMING_SNAKE_CASE ) for number in data.strip().split("," )]
_A : Dict = filter_valid_chars(_SCREAMING_SNAKE_CASE )
for common_word in COMMON_WORDS:
_A : int = filter_common_word(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) == 1:
break
_A : Tuple = possibles[0]
return sum(ord(_SCREAMING_SNAKE_CASE ) for char in decoded_text )
if __name__ == "__main__":
print(f"{solution() = }")
| 11
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_albert import AlbertTokenizer
else:
lowerCamelCase__ = None
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
lowerCamelCase__ = {
"""vocab_file""": {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/spiece.model""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/spiece.model""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/spiece.model""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/spiece.model""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model""",
},
"""tokenizer_file""": {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json""",
},
}
lowerCamelCase__ = {
"""albert-base-v1""": 512,
"""albert-large-v1""": 512,
"""albert-xlarge-v1""": 512,
"""albert-xxlarge-v1""": 512,
"""albert-base-v2""": 512,
"""albert-large-v2""": 512,
"""albert-xlarge-v2""": 512,
"""albert-xxlarge-v2""": 512,
}
lowerCamelCase__ = """▁"""
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
__lowerCamelCase : List[Any] =VOCAB_FILES_NAMES
__lowerCamelCase : Union[str, Any] =PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase : List[str] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase : Any =AlbertTokenizer
def __init__( self : Tuple , __lowercase : Union[str, Any]=None , __lowercase : Optional[int]=None , __lowercase : int=True , __lowercase : Dict=True , __lowercase : str=False , __lowercase : str="[CLS]" , __lowercase : List[Any]="[SEP]" , __lowercase : Any="<unk>" , __lowercase : List[Any]="[SEP]" , __lowercase : List[Any]="<pad>" , __lowercase : Optional[Any]="[CLS]" , __lowercase : List[str]="[MASK]" , **__lowercase : str , ):
'''simple docstring'''
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
__a = (
AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase , normalized=__lowercase )
if isinstance(__lowercase , __lowercase )
else mask_token
)
super().__init__(
__lowercase , tokenizer_file=__lowercase , do_lower_case=__lowercase , remove_space=__lowercase , keep_accents=__lowercase , bos_token=__lowercase , eos_token=__lowercase , unk_token=__lowercase , sep_token=__lowercase , pad_token=__lowercase , cls_token=__lowercase , mask_token=__lowercase , **__lowercase , )
__a = do_lower_case
__a = remove_space
__a = keep_accents
__a = vocab_file
__a = False if not self.vocab_file else True
def UpperCamelCase_ ( self : Dict , __lowercase : List[int] , __lowercase : Optional[List[int]] = None ):
'''simple docstring'''
__a = [self.sep_token_id]
__a = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCamelCase_ ( self : str , __lowercase : List[int] , __lowercase : Optional[List[int]] = None ):
'''simple docstring'''
__a = [self.sep_token_id]
__a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase_ ( self : Tuple , __lowercase : str , __lowercase : Optional[str] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(__lowercase ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
__a = os.path.join(
__lowercase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowercase ):
copyfile(self.vocab_file , __lowercase )
return (out_vocab_file,)
| 302
| 0
|
import random
def _SCREAMING_SNAKE_CASE ( a , a ) -> Union[str, Any]:
__A , __A , __A : Optional[int] = [], [], []
for element in data:
if element < pivot:
less.append(_SCREAMING_SNAKE_CASE )
elif element > pivot:
greater.append(_SCREAMING_SNAKE_CASE )
else:
equal.append(_SCREAMING_SNAKE_CASE )
return less, equal, greater
def _SCREAMING_SNAKE_CASE ( a , a ) -> int:
if index >= len(_SCREAMING_SNAKE_CASE ) or index < 0:
return None
__A : str = items[random.randint(0 , len(_SCREAMING_SNAKE_CASE ) - 1 )]
__A : List[Any] = 0
__A , __A , __A : Optional[Any] = _partition(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__A : Tuple = len(_SCREAMING_SNAKE_CASE )
__A : Optional[Any] = len(_SCREAMING_SNAKE_CASE )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# must be in larger
else:
return quick_select(_SCREAMING_SNAKE_CASE , index - (m + count) )
| 280
|
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
__lowerCamelCase : Optional[int] =(IPNDMScheduler,)
__lowerCamelCase : int =(('num_inference_steps', 50),)
def UpperCamelCase_ ( self : str , **__lowercase : Dict ):
'''simple docstring'''
__a = {"""num_train_timesteps""": 1000}
config.update(**__lowercase )
return config
def UpperCamelCase_ ( self : Any , __lowercase : Tuple=0 , **__lowercase : Dict ):
'''simple docstring'''
__a = dict(self.forward_default_kwargs )
__a = kwargs.pop("""num_inference_steps""" , __lowercase )
__a = self.dummy_sample
__a = 0.1 * sample
__a = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
__a = self.get_scheduler_config(**__lowercase )
__a = scheduler_class(**__lowercase )
scheduler.set_timesteps(__lowercase )
# copy over dummy past residuals
__a = dummy_past_residuals[:]
if time_step is None:
__a = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__lowercase )
__a = scheduler_class.from_pretrained(__lowercase )
new_scheduler.set_timesteps(__lowercase )
# copy over dummy past residuals
__a = dummy_past_residuals[:]
__a = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
__a = new_scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
__a = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
__a = new_scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self : str , __lowercase : int=0 , **__lowercase : Dict ):
'''simple docstring'''
__a = dict(self.forward_default_kwargs )
__a = kwargs.pop("""num_inference_steps""" , __lowercase )
__a = self.dummy_sample
__a = 0.1 * sample
__a = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
__a = self.get_scheduler_config()
__a = scheduler_class(**__lowercase )
scheduler.set_timesteps(__lowercase )
# copy over dummy past residuals (must be after setting timesteps)
__a = dummy_past_residuals[:]
if time_step is None:
__a = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__lowercase )
__a = scheduler_class.from_pretrained(__lowercase )
# copy over dummy past residuals
new_scheduler.set_timesteps(__lowercase )
# copy over dummy past residual (must be after setting timesteps)
__a = dummy_past_residuals[:]
__a = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
__a = new_scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
__a = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
__a = new_scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCamelCase_ ( self : List[str] , **__lowercase : Dict ):
'''simple docstring'''
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config(**__lowercase )
__a = scheduler_class(**__lowercase )
__a = 10
__a = self.dummy_model()
__a = self.dummy_sample_deter
scheduler.set_timesteps(__lowercase )
for i, t in enumerate(scheduler.timesteps ):
__a = model(__lowercase , __lowercase )
__a = scheduler.step(__lowercase , __lowercase , __lowercase ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
__a = model(__lowercase , __lowercase )
__a = scheduler.step(__lowercase , __lowercase , __lowercase ).prev_sample
return sample
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
__a = dict(self.forward_default_kwargs )
__a = kwargs.pop("""num_inference_steps""" , __lowercase )
for scheduler_class in self.scheduler_classes:
__a = self.get_scheduler_config()
__a = scheduler_class(**__lowercase )
__a = self.dummy_sample
__a = 0.1 * sample
if num_inference_steps is not None and hasattr(__lowercase , """set_timesteps""" ):
scheduler.set_timesteps(__lowercase )
elif num_inference_steps is not None and not hasattr(__lowercase , """set_timesteps""" ):
__a = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
__a = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
__a = dummy_past_residuals[:]
__a = scheduler.timesteps[5]
__a = scheduler.timesteps[6]
__a = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
__a = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
__a = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
__a = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=__lowercase , time_step=__lowercase )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=__lowercase , time_step=__lowercase )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
__a = self.full_loop()
__a = torch.mean(torch.abs(__lowercase ) )
assert abs(result_mean.item() - 2540529 ) < 10
| 302
| 0
|
def _snake_case ( lowerCAmelCase : list ):
"""simple docstring"""
if not grid or not grid[0]:
raise TypeError("The grid does not contain the appropriate information" )
for cell_n in range(1 , len(grid[0] ) ):
grid[0][cell_n] += grid[0][cell_n - 1]
SCREAMING_SNAKE_CASE_ : int = grid[0]
for row_n in range(1 , len(_SCREAMING_SNAKE_CASE ) ):
SCREAMING_SNAKE_CASE_ : List[str] = grid[row_n]
SCREAMING_SNAKE_CASE_ : Optional[Any] = fill_row(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : List[str] = grid[row_n]
return grid[-1][-1]
def _snake_case ( lowerCAmelCase : list , lowerCAmelCase : list ):
"""simple docstring"""
current_row[0] += row_above[0]
for cell_n in range(1 , len(_SCREAMING_SNAKE_CASE ) ):
current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n] )
return current_row
if __name__ == "__main__":
import doctest
doctest.testmod()
| 18
|
from __future__ import annotations
lowerCamelCase__ = {
"""A""": ["""B""", """C""", """E"""],
"""B""": ["""A""", """D""", """E"""],
"""C""": ["""A""", """F""", """G"""],
"""D""": ["""B"""],
"""E""": ["""A""", """B""", """D"""],
"""F""": ["""C"""],
"""G""": ["""C"""],
}
class SCREAMING_SNAKE_CASE :
def __init__( self : Tuple , __lowercase : dict[str, list[str]] , __lowercase : str ):
'''simple docstring'''
__a = graph
# mapping node to its parent in resulting breadth first tree
__a = {}
__a = source_vertex
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
__a = {self.source_vertex}
__a = None
__a = [self.source_vertex] # first in first out queue
while queue:
__a = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(__lowercase )
__a = vertex
queue.append(__lowercase )
def UpperCamelCase_ ( self : Optional[int] , __lowercase : str ):
'''simple docstring'''
if target_vertex == self.source_vertex:
return self.source_vertex
__a = self.parent.get(__lowercase )
if target_vertex_parent is None:
__a = (
F"No path from vertex: {self.source_vertex} to vertex: {target_vertex}"
)
raise ValueError(__lowercase )
return self.shortest_path(__lowercase ) + F"->{target_vertex}"
if __name__ == "__main__":
lowerCamelCase__ = Graph(graph, """G""")
g.breath_first_search()
print(g.shortest_path("""D"""))
print(g.shortest_path("""G"""))
print(g.shortest_path("""Foo"""))
| 302
| 0
|
"""simple docstring"""
import os
import re
import sys
import traceback
import warnings
from pathlib import Path
from typing import Dict, Optional, Union
from uuid import uuida
from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami
from huggingface_hub.file_download import REGEX_COMMIT_HASH
from huggingface_hub.utils import (
EntryNotFoundError,
RepositoryNotFoundError,
RevisionNotFoundError,
is_jinja_available,
)
from packaging import version
from requests import HTTPError
from .. import __version__
from .constants import (
DEPRECATED_REVISION_ARGS,
DIFFUSERS_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
)
from .import_utils import (
ENV_VARS_TRUE_VALUES,
_flax_version,
_jax_version,
_onnxruntime_version,
_torch_version,
is_flax_available,
is_onnx_available,
is_torch_available,
)
from .logging import get_logger
lowerCamelCase_ : Union[str, Any] = get_logger(__name__)
lowerCamelCase_ : Any = Path(__file__).parent / """model_card_template.md"""
lowerCamelCase_ : Optional[int] = uuida().hex
lowerCamelCase_ : str = os.getenv("""HF_HUB_OFFLINE""", """""").upper() in ENV_VARS_TRUE_VALUES
lowerCamelCase_ : Tuple = os.getenv("""DISABLE_TELEMETRY""", """""").upper() in ENV_VARS_TRUE_VALUES
lowerCamelCase_ : Dict = HUGGINGFACE_CO_RESOLVE_ENDPOINT + """/api/telemetry/"""
def _A ( lowercase = None ):
"""simple docstring"""
a =f'''diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}'''
if DISABLE_TELEMETRY or HF_HUB_OFFLINE:
return ua + "; telemetry/off"
if is_torch_available():
ua += f'''; torch/{_torch_version}'''
if is_flax_available():
ua += f'''; jax/{_jax_version}'''
ua += f'''; flax/{_flax_version}'''
if is_onnx_available():
ua += f'''; onnxruntime/{_onnxruntime_version}'''
# CI will set this value to True
if os.environ.get('''DIFFUSERS_IS_CI''' , '''''' ).upper() in ENV_VARS_TRUE_VALUES:
ua += "; is_ci/true"
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
ua += "; " + "; ".join(f'''{k}/{v}''' for k, v in user_agent.items() )
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
ua += "; " + user_agent
return ua
def _A ( lowercase , lowercase = None , lowercase = None ):
"""simple docstring"""
if token is None:
a =HfFolder.get_token()
if organization is None:
a =whoami(_SCREAMING_SNAKE_CASE )['''name''']
return f'''{username}/{model_id}'''
else:
return f'''{organization}/{model_id}'''
def _A ( lowercase , lowercase ):
"""simple docstring"""
if not is_jinja_available():
raise ValueError(
'''Modelcard rendering is based on Jinja templates.'''
''' Please make sure to have `jinja` installed before using `create_model_card`.'''
''' To install it, please run `pip install Jinja2`.''' )
if hasattr(_SCREAMING_SNAKE_CASE , '''local_rank''' ) and args.local_rank not in [-1, 0]:
return
a =args.hub_token if hasattr(_SCREAMING_SNAKE_CASE , '''hub_token''' ) else None
a =get_full_repo_name(_SCREAMING_SNAKE_CASE , token=_SCREAMING_SNAKE_CASE )
a =ModelCard.from_template(
card_data=ModelCardData( # Card metadata object that will be converted to YAML block
language='''en''' , license='''apache-2.0''' , library_name='''diffusers''' , tags=[] , datasets=args.dataset_name , metrics=[] , ) , template_path=_SCREAMING_SNAKE_CASE , model_name=_SCREAMING_SNAKE_CASE , repo_name=_SCREAMING_SNAKE_CASE , dataset_name=args.dataset_name if hasattr(_SCREAMING_SNAKE_CASE , '''dataset_name''' ) else None , learning_rate=args.learning_rate , train_batch_size=args.train_batch_size , eval_batch_size=args.eval_batch_size , gradient_accumulation_steps=(
args.gradient_accumulation_steps if hasattr(_SCREAMING_SNAKE_CASE , '''gradient_accumulation_steps''' ) else None
) , adam_betaa=args.adam_betaa if hasattr(_SCREAMING_SNAKE_CASE , '''adam_beta1''' ) else None , adam_betaa=args.adam_betaa if hasattr(_SCREAMING_SNAKE_CASE , '''adam_beta2''' ) else None , adam_weight_decay=args.adam_weight_decay if hasattr(_SCREAMING_SNAKE_CASE , '''adam_weight_decay''' ) else None , adam_epsilon=args.adam_epsilon if hasattr(_SCREAMING_SNAKE_CASE , '''adam_epsilon''' ) else None , lr_scheduler=args.lr_scheduler if hasattr(_SCREAMING_SNAKE_CASE , '''lr_scheduler''' ) else None , lr_warmup_steps=args.lr_warmup_steps if hasattr(_SCREAMING_SNAKE_CASE , '''lr_warmup_steps''' ) else None , ema_inv_gamma=args.ema_inv_gamma if hasattr(_SCREAMING_SNAKE_CASE , '''ema_inv_gamma''' ) else None , ema_power=args.ema_power if hasattr(_SCREAMING_SNAKE_CASE , '''ema_power''' ) else None , ema_max_decay=args.ema_max_decay if hasattr(_SCREAMING_SNAKE_CASE , '''ema_max_decay''' ) else None , mixed_precision=args.mixed_precision , )
a =os.path.join(args.output_dir , '''README.md''' )
model_card.save(_SCREAMING_SNAKE_CASE )
def _A ( lowercase , lowercase = None ):
"""simple docstring"""
if resolved_file is None or commit_hash is not None:
return commit_hash
a =str(Path(_SCREAMING_SNAKE_CASE ).as_posix() )
a =re.search(R'''snapshots/([^/]+)/''' , _SCREAMING_SNAKE_CASE )
if search is None:
return None
a =search.groups()[0]
return commit_hash if REGEX_COMMIT_HASH.match(_SCREAMING_SNAKE_CASE ) else None
# Old default cache path, potentially to be migrated.
# This logic was more or less taken from `transformers`, with the following differences:
# - Diffusers doesn't use custom environment variables to specify the cache path.
# - There is no need to migrate the cache format, just move the files to the new location.
lowerCamelCase_ : int = os.path.expanduser(
os.getenv("""HF_HOME""", os.path.join(os.getenv("""XDG_CACHE_HOME""", """~/.cache"""), """huggingface"""))
)
lowerCamelCase_ : Any = os.path.join(hf_cache_home, """diffusers""")
def _A ( lowercase = None , lowercase = None ):
"""simple docstring"""
if new_cache_dir is None:
a =DIFFUSERS_CACHE
if old_cache_dir is None:
a =old_diffusers_cache
a =Path(_SCREAMING_SNAKE_CASE ).expanduser()
a =Path(_SCREAMING_SNAKE_CASE ).expanduser()
for old_blob_path in old_cache_dir.glob('''**/blobs/*''' ):
if old_blob_path.is_file() and not old_blob_path.is_symlink():
a =new_cache_dir / old_blob_path.relative_to(_SCREAMING_SNAKE_CASE )
new_blob_path.parent.mkdir(parents=_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE )
os.replace(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
try:
os.symlink(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
except OSError:
logger.warning(
'''Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded.''' )
# At this point, old_cache_dir contains symlinks to the new cache (it can still be used).
lowerCamelCase_ : List[Any] = os.path.join(DIFFUSERS_CACHE, """version_diffusers_cache.txt""")
if not os.path.isfile(cache_version_file):
lowerCamelCase_ : Any = 0
else:
with open(cache_version_file) as f:
try:
lowerCamelCase_ : List[Any] = int(f.read())
except ValueError:
lowerCamelCase_ : List[str] = 0
if cache_version < 1:
lowerCamelCase_ : Any = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0
if old_cache_is_not_empty:
logger.warning(
"""The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your """
"""existing cached models. This is a one-time operation, you can interrupt it or run it """
"""later by calling `diffusers.utils.hub_utils.move_cache()`."""
)
try:
move_cache()
except Exception as e:
lowerCamelCase_ : str = """\n""".join(traceback.format_tb(e.__traceback__))
logger.error(
F'There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease '
"""file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole """
"""message and we will do our best to help."""
)
if cache_version < 1:
try:
os.makedirs(DIFFUSERS_CACHE, exist_ok=True)
with open(cache_version_file, """w""") as f:
f.write("""1""")
except Exception:
logger.warning(
F'There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure '
"""the directory exists and can be written to."""
)
def _A ( lowercase , lowercase = None ):
"""simple docstring"""
if variant is not None:
a =weights_name.split('''.''' )
a =splits[:-1] + [variant] + splits[-1:]
a ='''.'''.join(_SCREAMING_SNAKE_CASE )
return weights_name
def _A ( lowercase , *,
lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase=None , ):
"""simple docstring"""
a =str(_SCREAMING_SNAKE_CASE )
if os.path.isfile(_SCREAMING_SNAKE_CASE ):
return pretrained_model_name_or_path
elif os.path.isdir(_SCREAMING_SNAKE_CASE ):
if os.path.isfile(os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ):
# Load from a PyTorch checkpoint
a =os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return model_file
elif subfolder is not None and os.path.isfile(
os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ):
a =os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return model_file
else:
raise EnvironmentError(
f'''Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.''' )
else:
# 1. First check if deprecated way of loading from branches is used
if (
revision in DEPRECATED_REVISION_ARGS
and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME)
and version.parse(version.parse(_SCREAMING_SNAKE_CASE ).base_version ) >= version.parse('''0.20.0''' )
):
try:
a =hf_hub_download(
_SCREAMING_SNAKE_CASE , filename=_add_variant(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , cache_dir=_SCREAMING_SNAKE_CASE , force_download=_SCREAMING_SNAKE_CASE , proxies=_SCREAMING_SNAKE_CASE , resume_download=_SCREAMING_SNAKE_CASE , local_files_only=_SCREAMING_SNAKE_CASE , use_auth_token=_SCREAMING_SNAKE_CASE , user_agent=_SCREAMING_SNAKE_CASE , subfolder=_SCREAMING_SNAKE_CASE , revision=revision or commit_hash , )
warnings.warn(
f'''Loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'` is deprecated. Loading instead from `revision=\'main\'` with `variant={revision}`. Loading model variants via `revision=\'{revision}\'` will be removed in diffusers v1. Please use `variant=\'{revision}\'` instead.''' , _SCREAMING_SNAKE_CASE , )
return model_file
except: # noqa: E722
warnings.warn(
f'''You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant=\'{revision}\'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )} file in the \'main\' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title \'{pretrained_model_name_or_path} is missing {_add_variant(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )}\' so that the correct variant file can be added.''' , _SCREAMING_SNAKE_CASE , )
try:
# 2. Load model file as usual
a =hf_hub_download(
_SCREAMING_SNAKE_CASE , filename=_SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE , force_download=_SCREAMING_SNAKE_CASE , proxies=_SCREAMING_SNAKE_CASE , resume_download=_SCREAMING_SNAKE_CASE , local_files_only=_SCREAMING_SNAKE_CASE , use_auth_token=_SCREAMING_SNAKE_CASE , user_agent=_SCREAMING_SNAKE_CASE , subfolder=_SCREAMING_SNAKE_CASE , revision=revision or commit_hash , )
return model_file
except RepositoryNotFoundError:
raise EnvironmentError(
f'''{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier '''
'''listed on \'https://huggingface.co/models\'\nIf this is a private repository, make sure to pass a '''
'''token having permission to this repo with `use_auth_token` or log in with `huggingface-cli '''
'''login`.''' )
except RevisionNotFoundError:
raise EnvironmentError(
f'''{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for '''
'''this model name. Check the model page at '''
f'''\'https://huggingface.co/{pretrained_model_name_or_path}\' for available revisions.''' )
except EntryNotFoundError:
raise EnvironmentError(
f'''{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.''' )
except HTTPError as err:
raise EnvironmentError(
f'''There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}''' )
except ValueError:
raise EnvironmentError(
f'''We couldn\'t connect to \'{HUGGINGFACE_CO_RESOLVE_ENDPOINT}\' to load this model, couldn\'t find it'''
f''' in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a'''
f''' directory containing a file named {weights_name} or'''
''' \nCheckout your internet connection or see how to run the library in'''
''' offline mode at \'https://huggingface.co/docs/diffusers/installation#offline-mode\'.''' )
except EnvironmentError:
raise EnvironmentError(
f'''Can\'t load the model for \'{pretrained_model_name_or_path}\'. If you were trying to load it from '''
'''\'https://huggingface.co/models\', make sure you don\'t have a local directory with the same name. '''
f'''Otherwise, make sure \'{pretrained_model_name_or_path}\' is the correct path to a directory '''
f'''containing a file named {weights_name}''' )
| 81
|
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ , unittest.TestCase ):
__lowerCamelCase : Tuple =KandinskyVaaPriorPipeline
__lowerCamelCase : Union[str, Any] =['prompt']
__lowerCamelCase : Any =['prompt', 'negative_prompt']
__lowerCamelCase : List[str] =[
'num_images_per_prompt',
'generator',
'num_inference_steps',
'latents',
'negative_prompt',
'guidance_scale',
'output_type',
'return_dict',
]
__lowerCamelCase : List[Any] =False
@property
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
return 32
@property
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
return 32
@property
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
return self.time_input_dim
@property
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
return 100
@property
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
__a = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
return tokenizer
@property
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
torch.manual_seed(0 )
__a = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(__lowercase )
@property
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
torch.manual_seed(0 )
__a = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 12,
"""embedding_dim""": self.text_embedder_hidden_size,
"""num_layers""": 1,
}
__a = PriorTransformer(**__lowercase )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
__a = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
__a = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=224 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=14 , )
__a = CLIPVisionModelWithProjection(__lowercase )
return model
@property
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
__a = CLIPImageProcessor(
crop_size=224 , do_center_crop=__lowercase , do_normalize=__lowercase , do_resize=__lowercase , image_mean=[0.48145466, 0.4578275, 0.40821073] , image_std=[0.26862954, 0.26130258, 0.27577711] , resample=3 , size=224 , )
return image_processor
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
__a = self.dummy_prior
__a = self.dummy_image_encoder
__a = self.dummy_text_encoder
__a = self.dummy_tokenizer
__a = self.dummy_image_processor
__a = UnCLIPScheduler(
variance_type="""fixed_small_log""" , prediction_type="""sample""" , num_train_timesteps=1000 , clip_sample=__lowercase , clip_sample_range=10.0 , )
__a = {
"""prior""": prior,
"""image_encoder""": image_encoder,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""scheduler""": scheduler,
"""image_processor""": image_processor,
}
return components
def UpperCamelCase_ ( self : Optional[int] , __lowercase : List[str] , __lowercase : Any=0 ):
'''simple docstring'''
if str(__lowercase ).startswith("""mps""" ):
__a = torch.manual_seed(__lowercase )
else:
__a = torch.Generator(device=__lowercase ).manual_seed(__lowercase )
__a = {
"""prompt""": """horse""",
"""generator""": generator,
"""guidance_scale""": 4.0,
"""num_inference_steps""": 2,
"""output_type""": """np""",
}
return inputs
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
__a = """cpu"""
__a = self.get_dummy_components()
__a = self.pipeline_class(**__lowercase )
__a = pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
__a = pipe(**self.get_dummy_inputs(__lowercase ) )
__a = output.image_embeds
__a = pipe(
**self.get_dummy_inputs(__lowercase ) , return_dict=__lowercase , )[0]
__a = image[0, -10:]
__a = image_from_tuple[0, -10:]
assert image.shape == (1, 32)
__a = np.array(
[-0.0532, 1.7120, 0.3656, -1.0852, -0.8946, -1.1756, 0.4348, 0.2482, 0.5146, -0.1156] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
__a = torch_device == """cpu"""
__a = True
__a = False
self._test_inference_batch_single_identical(
test_max_difference=__lowercase , relax_max_difference=__lowercase , test_mean_pixel_difference=__lowercase , )
@skip_mps
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
__a = torch_device == """cpu"""
__a = False
self._test_attention_slicing_forward_pass(
test_max_difference=__lowercase , test_mean_pixel_difference=__lowercase , )
| 302
| 0
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class __A ( lowerCamelCase__ ):
'''simple docstring'''
lowerCAmelCase : Tuple = 'openai/whisper-base'
lowerCAmelCase : Tuple = (
'This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the '
'transcribed text.'
)
lowerCAmelCase : str = 'transcriber'
lowerCAmelCase : Any = WhisperProcessor
lowerCAmelCase : int = WhisperForConditionalGeneration
lowerCAmelCase : Optional[Any] = ['audio']
lowerCAmelCase : Dict = ['text']
def UpperCAmelCase ( self : int ,_snake_case : Dict ) -> List[str]:
"""simple docstring"""
return self.pre_processor(__lowercase ,return_tensors='''pt''' ).input_features
def UpperCAmelCase ( self : Union[str, Any] ,_snake_case : List[Any] ) -> Any:
"""simple docstring"""
return self.model.generate(inputs=__lowercase )
def UpperCAmelCase ( self : Optional[int] ,_snake_case : List[Any] ) -> Dict:
"""simple docstring"""
return self.pre_processor.batch_decode(__lowercase ,skip_special_tokens=__lowercase )[0]
| 16
|
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = Dict[str, Any]
lowerCamelCase__ = List[Prediction]
@add_end_docstrings(lowerCamelCase__ )
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
def __init__( self : Tuple , *__lowercase : Tuple , **__lowercase : Optional[int] ):
'''simple docstring'''
super().__init__(*__lowercase , **__lowercase )
if self.framework == "tf":
raise ValueError(F"The {self.__class__} is only available in PyTorch." )
requires_backends(self , """vision""" )
self.check_model_type(
dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) )
def UpperCamelCase_ ( self : Optional[int] , **__lowercase : List[str] ):
'''simple docstring'''
__a = {}
if "threshold" in kwargs:
__a = kwargs["""threshold"""]
return {}, {}, postprocess_kwargs
def __call__( self : List[Any] , *__lowercase : Any , **__lowercase : Tuple ):
'''simple docstring'''
return super().__call__(*__lowercase , **__lowercase )
def UpperCamelCase_ ( self : str , __lowercase : Tuple ):
'''simple docstring'''
__a = load_image(__lowercase )
__a = torch.IntTensor([[image.height, image.width]] )
__a = self.image_processor(images=[image] , return_tensors="""pt""" )
if self.tokenizer is not None:
__a = self.tokenizer(text=inputs["""words"""] , boxes=inputs["""boxes"""] , return_tensors="""pt""" )
__a = target_size
return inputs
def UpperCamelCase_ ( self : Dict , __lowercase : List[str] ):
'''simple docstring'''
__a = model_inputs.pop("""target_size""" )
__a = self.model(**__lowercase )
__a = outputs.__class__({"""target_size""": target_size, **outputs} )
if self.tokenizer is not None:
__a = model_inputs["""bbox"""]
return model_outputs
def UpperCamelCase_ ( self : Optional[int] , __lowercase : List[Any] , __lowercase : List[Any]=0.9 ):
'''simple docstring'''
__a = model_outputs["""target_size"""]
if self.tokenizer is not None:
# This is a LayoutLMForTokenClassification variant.
# The OCR got the boxes and the model classified the words.
__a , __a = target_size[0].tolist()
def unnormalize(__lowercase : Optional[Any] ):
return self._get_bounding_box(
torch.Tensor(
[
(width * bbox[0] / 1000),
(height * bbox[1] / 1000),
(width * bbox[2] / 1000),
(height * bbox[3] / 1000),
] ) )
__a , __a = model_outputs["""logits"""].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 )
__a = [self.model.config.idalabel[prediction] for prediction in classes.tolist()]
__a = [unnormalize(__lowercase ) for bbox in model_outputs["""bbox"""].squeeze(0 )]
__a = ["""score""", """label""", """box"""]
__a = [dict(zip(__lowercase , __lowercase ) ) for vals in zip(scores.tolist() , __lowercase , __lowercase ) if vals[0] > threshold]
else:
# This is a regular ForObjectDetectionModel
__a = self.image_processor.post_process_object_detection(__lowercase , __lowercase , __lowercase )
__a = raw_annotations[0]
__a = raw_annotation["""scores"""]
__a = raw_annotation["""labels"""]
__a = raw_annotation["""boxes"""]
__a = scores.tolist()
__a = [self.model.config.idalabel[label.item()] for label in labels]
__a = [self._get_bounding_box(__lowercase ) for box in boxes]
# {"scores": [...], ...} --> [{"score":x, ...}, ...]
__a = ["""score""", """label""", """box"""]
__a = [
dict(zip(__lowercase , __lowercase ) )
for vals in zip(raw_annotation["""scores"""] , raw_annotation["""labels"""] , raw_annotation["""boxes"""] )
]
return annotation
def UpperCamelCase_ ( self : Optional[int] , __lowercase : "torch.Tensor" ):
'''simple docstring'''
if self.framework != "pt":
raise ValueError("""The ObjectDetectionPipeline is only available in PyTorch.""" )
__a , __a , __a , __a = box.int().tolist()
__a = {
"""xmin""": xmin,
"""ymin""": ymin,
"""xmax""": xmax,
"""ymax""": ymax,
}
return bbox
| 302
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case : List[Any] = {"configuration_wavlm": ["WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "WavLMConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Optional[Any] = [
"WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"WavLMForAudioFrameClassification",
"WavLMForCTC",
"WavLMForSequenceClassification",
"WavLMForXVector",
"WavLMModel",
"WavLMPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavlm import (
WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST,
WavLMForAudioFrameClassification,
WavLMForCTC,
WavLMForSequenceClassification,
WavLMForXVector,
WavLMModel,
WavLMPreTrainedModel,
)
else:
import sys
snake_case : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 281
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCamelCase__ = {
"""configuration_efficientnet""": [
"""EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""EfficientNetConfig""",
"""EfficientNetOnnxConfig""",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""EfficientNetImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""EfficientNetForImageClassification""",
"""EfficientNetModel""",
"""EfficientNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_efficientnet import (
EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
EfficientNetConfig,
EfficientNetOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientnet import EfficientNetImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientnet import (
EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientNetForImageClassification,
EfficientNetModel,
EfficientNetPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 302
| 0
|
'''simple docstring'''
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _lowercase :
def __init__( self: int , UpperCamelCase__: Optional[Any] , UpperCamelCase__: int=13 , UpperCamelCase__: int=32 , UpperCamelCase__: str=3 , UpperCamelCase__: List[str]=4 , UpperCamelCase__: Dict=[10, 20, 30, 40] , UpperCamelCase__: str=[2, 2, 3, 2] , UpperCamelCase__: str=True , UpperCamelCase__: Union[str, Any]=True , UpperCamelCase__: Optional[int]=37 , UpperCamelCase__: Dict="gelu" , UpperCamelCase__: Dict=10 , UpperCamelCase__: Any=0.02 , UpperCamelCase__: Tuple=["stage2", "stage3", "stage4"] , UpperCamelCase__: int=[2, 3, 4] , UpperCamelCase__: List[str]=None , ):
lowerCamelCase__ : List[str] = parent
lowerCamelCase__ : Any = batch_size
lowerCamelCase__ : Any = image_size
lowerCamelCase__ : Union[str, Any] = num_channels
lowerCamelCase__ : Any = num_stages
lowerCamelCase__ : str = hidden_sizes
lowerCamelCase__ : List[str] = depths
lowerCamelCase__ : List[Any] = is_training
lowerCamelCase__ : int = use_labels
lowerCamelCase__ : Dict = intermediate_size
lowerCamelCase__ : Optional[Any] = hidden_act
lowerCamelCase__ : List[str] = num_labels
lowerCamelCase__ : Optional[int] = initializer_range
lowerCamelCase__ : str = out_features
lowerCamelCase__ : str = out_indices
lowerCamelCase__ : List[str] = scope
def lowerCamelCase_ ( self: Union[str, Any] ):
lowerCamelCase__ : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase__ : Tuple = None
if self.use_labels:
lowerCamelCase__ : Dict = ids_tensor([self.batch_size] , self.num_labels )
lowerCamelCase__ : str = self.get_config()
return config, pixel_values, labels
def lowerCamelCase_ ( self: Union[str, Any] ):
return ConvNextVaConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=__lowercase , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def lowerCamelCase_ ( self: Any , UpperCamelCase__: Tuple , UpperCamelCase__: List[Any] , UpperCamelCase__: Tuple ):
lowerCamelCase__ : Any = ConvNextVaModel(config=__lowercase )
model.to(__lowercase )
model.eval()
lowerCamelCase__ : Dict = model(__lowercase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowerCamelCase_ ( self: Any , UpperCamelCase__: Dict , UpperCamelCase__: Dict , UpperCamelCase__: List[Any] ):
lowerCamelCase__ : Tuple = ConvNextVaForImageClassification(__lowercase )
model.to(__lowercase )
model.eval()
lowerCamelCase__ : Optional[int] = model(__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self: int , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: List[str] , UpperCamelCase__: Tuple ):
lowerCamelCase__ : str = ConvNextVaBackbone(config=__lowercase )
model.to(__lowercase )
model.eval()
lowerCamelCase__ : int = model(__lowercase )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
lowerCamelCase__ : List[Any] = None
lowerCamelCase__ : List[str] = ConvNextVaBackbone(config=__lowercase )
model.to(__lowercase )
model.eval()
lowerCamelCase__ : Optional[int] = model(__lowercase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ : Optional[int] = self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = config_and_inputs
lowerCamelCase__ : int = {"""pixel_values""": pixel_values}
return config, inputs_dict
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : Optional[int] = self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : str = config_and_inputs
lowerCamelCase__ : List[Any] = {"""pixel_values""": pixel_values, """labels""": labels}
return config, inputs_dict
@require_torch
class _lowercase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
a = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
a = (
{'feature-extraction': ConvNextVaModel, 'image-classification': ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
a = False
a = False
a = False
a = False
a = False
def lowerCamelCase_ ( self: Optional[int] ):
lowerCamelCase__ : str = ConvNextVaModelTester(self )
lowerCamelCase__ : Tuple = ConfigTester(self , config_class=__lowercase , has_text_modality=__lowercase , hidden_size=37 )
def lowerCamelCase_ ( self: Dict ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCamelCase_ ( self: str ):
return
@unittest.skip(reason="""ConvNextV2 does not use inputs_embeds""" )
def lowerCamelCase_ ( self: List[Any] ):
pass
@unittest.skip(reason="""ConvNextV2 does not support input and output embeddings""" )
def lowerCamelCase_ ( self: Tuple ):
pass
@unittest.skip(reason="""ConvNextV2 does not use feedforward chunking""" )
def lowerCamelCase_ ( self: List[Any] ):
pass
def lowerCamelCase_ ( self: List[Any] ):
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
lowerCamelCase__ , lowerCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs_with_labels()
lowerCamelCase__ : Tuple = True
if model_class.__name__ in [
*get_values(__lowercase ),
*get_values(__lowercase ),
]:
continue
lowerCamelCase__ : str = model_class(__lowercase )
model.to(__lowercase )
model.train()
lowerCamelCase__ : str = self._prepare_for_class(__lowercase , __lowercase , return_labels=__lowercase )
lowerCamelCase__ : Dict = model(**__lowercase ).loss
loss.backward()
def lowerCamelCase_ ( self: Dict ):
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
lowerCamelCase__ , lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_with_labels()
lowerCamelCase__ : List[str] = False
lowerCamelCase__ : int = True
if (
model_class.__name__
in [*get_values(__lowercase ), *get_values(__lowercase )]
or not model_class.supports_gradient_checkpointing
):
continue
lowerCamelCase__ : Dict = model_class(__lowercase )
model.to(__lowercase )
model.gradient_checkpointing_enable()
model.train()
lowerCamelCase__ : Any = self._prepare_for_class(__lowercase , __lowercase , return_labels=__lowercase )
lowerCamelCase__ : Optional[int] = model(**__lowercase ).loss
loss.backward()
def lowerCamelCase_ ( self: int ):
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : int = model_class(__lowercase )
lowerCamelCase__ : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__ : List[Any] = [*signature.parameters.keys()]
lowerCamelCase__ : Optional[int] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __lowercase )
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowercase )
def lowerCamelCase_ ( self: Optional[Any] ):
def check_hidden_states_output(UpperCamelCase__: List[Any] , UpperCamelCase__: Optional[int] , UpperCamelCase__: Optional[Any] ):
lowerCamelCase__ : Union[str, Any] = model_class(__lowercase )
model.to(__lowercase )
model.eval()
with torch.no_grad():
lowerCamelCase__ : Dict = model(**self._prepare_for_class(__lowercase , __lowercase ) )
lowerCamelCase__ : Any = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCamelCase__ : Any = self.model_tester.num_stages
self.assertEqual(len(__lowercase ) , expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowerCamelCase__ , lowerCamelCase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : List[str] = True
check_hidden_states_output(__lowercase , __lowercase , __lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase__ : Dict = True
check_hidden_states_output(__lowercase , __lowercase , __lowercase )
def lowerCamelCase_ ( self: List[Any] ):
lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowercase )
@slow
def lowerCamelCase_ ( self: Optional[int] ):
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ : Optional[Any] = ConvNextVaModel.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
def SCREAMING_SNAKE_CASE_ () -> int:
lowerCamelCase__ : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class _lowercase ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self: str ):
return AutoImageProcessor.from_pretrained("""facebook/convnextv2-tiny-1k-224""" ) if is_vision_available() else None
@slow
def lowerCamelCase_ ( self: Dict ):
lowerCamelCase__ : List[Any] = ConvNextVaForImageClassification.from_pretrained("""facebook/convnextv2-tiny-1k-224""" ).to(__lowercase )
lowerCamelCase__ : Optional[Any] = self.default_image_processor
lowerCamelCase__ : Tuple = prepare_img()
lowerCamelCase__ : str = preprocessor(images=__lowercase , return_tensors="""pt""" ).to(__lowercase )
# forward pass
with torch.no_grad():
lowerCamelCase__ : int = model(**__lowercase )
# verify the logits
lowerCamelCase__ : Dict = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , __lowercase )
lowerCamelCase__ : Dict = torch.tensor([0.9_996, 0.1_966, -0.4_386] ).to(__lowercase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowercase , atol=1e-4 ) )
| 41
|
import random
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
__a , __a , __a = [], [], []
for element in data:
if element < pivot:
less.append(_SCREAMING_SNAKE_CASE )
elif element > pivot:
greater.append(_SCREAMING_SNAKE_CASE )
else:
equal.append(_SCREAMING_SNAKE_CASE )
return less, equal, greater
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
if index >= len(_SCREAMING_SNAKE_CASE ) or index < 0:
return None
__a = items[random.randint(0 , len(_SCREAMING_SNAKE_CASE ) - 1 )]
__a = 0
__a , __a , __a = _partition(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__a = len(_SCREAMING_SNAKE_CASE )
__a = len(_SCREAMING_SNAKE_CASE )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# must be in larger
else:
return quick_select(_SCREAMING_SNAKE_CASE , index - (m + count) )
| 302
| 0
|
def UpperCamelCase__( UpperCamelCase__ : list )->Optional[Any]:
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise ValueError('''Input series is not valid, valid series - [2, 4, 6]''' )
if len(_SCREAMING_SNAKE_CASE ) == 0:
raise ValueError('''Input list must be a non empty list''' )
if len(_SCREAMING_SNAKE_CASE ) == 1:
return True
A__ = series[1] - series[0]
for index in range(len(_SCREAMING_SNAKE_CASE ) - 1 ):
if series[index + 1] - series[index] != common_diff:
return False
return True
def UpperCamelCase__( UpperCamelCase__ : list )->Any:
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise ValueError('''Input series is not valid, valid series - [2, 4, 6]''' )
if len(_SCREAMING_SNAKE_CASE ) == 0:
raise ValueError('''Input list must be a non empty list''' )
A__ = 0
for val in series:
answer += val
return answer / len(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 193
|
from collections import UserDict
from typing import Union
import numpy as np
import requests
from ..utils import (
add_end_docstrings,
logging,
)
from .audio_classification import ffmpeg_read
from .base import PIPELINE_INIT_ARGS, Pipeline
lowerCamelCase__ = logging.get_logger(__name__)
@add_end_docstrings(lowerCamelCase__ )
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
def __init__( self : Optional[int] , **__lowercase : Dict ):
'''simple docstring'''
super().__init__(**__lowercase )
if self.framework != "pt":
raise ValueError(F"The {self.__class__} is only available in PyTorch." )
# No specific FOR_XXX available yet
def __call__( self : str , __lowercase : Union[np.ndarray, bytes, str] , **__lowercase : int ):
'''simple docstring'''
return super().__call__(__lowercase , **__lowercase )
def UpperCamelCase_ ( self : List[Any] , **__lowercase : Union[str, Any] ):
'''simple docstring'''
__a = {}
if "candidate_labels" in kwargs:
__a = kwargs["""candidate_labels"""]
if "hypothesis_template" in kwargs:
__a = kwargs["""hypothesis_template"""]
return preprocess_params, {}, {}
def UpperCamelCase_ ( self : int , __lowercase : Dict , __lowercase : Dict=None , __lowercase : str="This is a sound of {}." ):
'''simple docstring'''
if isinstance(__lowercase , __lowercase ):
if audio.startswith("""http://""" ) or audio.startswith("""https://""" ):
# We need to actually check for a real protocol, otherwise it's impossible to use a local file
# like http_huggingface_co.png
__a = requests.get(__lowercase ).content
else:
with open(__lowercase , """rb""" ) as f:
__a = f.read()
if isinstance(__lowercase , __lowercase ):
__a = ffmpeg_read(__lowercase , self.feature_extractor.sampling_rate )
if not isinstance(__lowercase , np.ndarray ):
raise ValueError("""We expect a numpy ndarray as input""" )
if len(audio.shape ) != 1:
raise ValueError("""We expect a single channel audio input for ZeroShotAudioClassificationPipeline""" )
__a = self.feature_extractor(
[audio] , sampling_rate=self.feature_extractor.sampling_rate , return_tensors="""pt""" )
__a = candidate_labels
__a = [hypothesis_template.format(__lowercase ) for x in candidate_labels]
__a = self.tokenizer(__lowercase , return_tensors=self.framework , padding=__lowercase )
__a = [text_inputs]
return inputs
def UpperCamelCase_ ( self : Any , __lowercase : Any ):
'''simple docstring'''
__a = model_inputs.pop("""candidate_labels""" )
__a = model_inputs.pop("""text_inputs""" )
if isinstance(text_inputs[0] , __lowercase ):
__a = text_inputs[0]
else:
# Batching case.
__a = text_inputs[0][0]
__a = self.model(**__lowercase , **__lowercase )
__a = {
"""candidate_labels""": candidate_labels,
"""logits""": outputs.logits_per_audio,
}
return model_outputs
def UpperCamelCase_ ( self : Optional[Any] , __lowercase : Dict ):
'''simple docstring'''
__a = model_outputs.pop("""candidate_labels""" )
__a = model_outputs["""logits"""][0]
if self.framework == "pt":
__a = logits.softmax(dim=0 )
__a = probs.tolist()
else:
raise ValueError("""`tf` framework not supported.""" )
__a = [
{"""score""": score, """label""": candidate_label}
for score, candidate_label in sorted(zip(__lowercase , __lowercase ) , key=lambda __lowercase : -x[0] )
]
return result
| 302
| 0
|
from __future__ import annotations
from collections import namedtuple
def UpperCamelCase ( _A, _A, _A ):
"""simple docstring"""
__magic_name__ : Optional[Any] = namedtuple("""result""", """name value""" )
if (voltage, current, power).count(0 ) != 1:
raise ValueError("""Only one argument must be 0""" )
elif power < 0:
raise ValueError(
"""Power cannot be negative in any electrical/electronics system""" )
elif voltage == 0:
return result("""voltage""", power / current )
elif current == 0:
return result("""current""", power / voltage )
elif power == 0:
return result("""power""", float(round(abs(voltage * current ), 2 ) ) )
else:
raise ValueError("""Exactly one argument must be 0""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 342
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_batched,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
lowerCamelCase__ = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
__lowerCamelCase : Dict =['pixel_values']
def __init__( self : Optional[int] , __lowercase : bool = True , __lowercase : Optional[Dict[str, int]] = None , __lowercase : PILImageResampling = PILImageResampling.BICUBIC , __lowercase : bool = True , __lowercase : bool = True , __lowercase : Union[int, float] = 1 / 255 , __lowercase : Dict[str, int] = None , __lowercase : bool = True , __lowercase : Optional[Union[float, List[float]]] = None , __lowercase : Optional[Union[float, List[float]]] = None , **__lowercase : Dict , ):
'''simple docstring'''
super().__init__(**__lowercase )
__a = size if size is not None else {"""height""": 224, """width""": 224}
__a = get_size_dict(__lowercase )
__a = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
__a = get_size_dict(__lowercase , default_to_square=__lowercase , param_name="""crop_size""" )
__a = do_resize
__a = do_rescale
__a = do_normalize
__a = do_center_crop
__a = crop_size
__a = size
__a = resample
__a = rescale_factor
__a = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
__a = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def UpperCamelCase_ ( self : Any , __lowercase : np.ndarray , __lowercase : Dict[str, int] , __lowercase : PILImageResampling = PILImageResampling.BILINEAR , __lowercase : Optional[Union[str, ChannelDimension]] = None , **__lowercase : Optional[Any] , ):
'''simple docstring'''
__a = get_size_dict(__lowercase )
if "shortest_edge" in size:
__a = get_resize_output_image_size(__lowercase , size=size["""shortest_edge"""] , default_to_square=__lowercase )
# size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"])
elif "height" in size and "width" in size:
__a = (size["""height"""], size["""width"""])
else:
raise ValueError(F"Size must contain 'height' and 'width' keys or 'shortest_edge' key. Got {size.keys()}" )
return resize(__lowercase , size=__lowercase , resample=__lowercase , data_format=__lowercase , **__lowercase )
def UpperCamelCase_ ( self : str , __lowercase : np.ndarray , __lowercase : Dict[str, int] , __lowercase : Optional[Union[str, ChannelDimension]] = None , **__lowercase : List[Any] , ):
'''simple docstring'''
__a = get_size_dict(__lowercase )
if "height" not in size or "width" not in size:
raise ValueError(F"The `size` parameter must contain the keys (height, width). Got {size.keys()}" )
return center_crop(__lowercase , size=(size["""height"""], size["""width"""]) , data_format=__lowercase , **__lowercase )
def UpperCamelCase_ ( self : Any , __lowercase : np.ndarray , __lowercase : float , __lowercase : Optional[Union[str, ChannelDimension]] = None , **__lowercase : str ):
'''simple docstring'''
return rescale(__lowercase , scale=__lowercase , data_format=__lowercase , **__lowercase )
def UpperCamelCase_ ( self : List[Any] , __lowercase : np.ndarray , __lowercase : Union[float, List[float]] , __lowercase : Union[float, List[float]] , __lowercase : Optional[Union[str, ChannelDimension]] = None , **__lowercase : Any , ):
'''simple docstring'''
return normalize(__lowercase , mean=__lowercase , std=__lowercase , data_format=__lowercase , **__lowercase )
def UpperCamelCase_ ( self : Tuple , __lowercase : ImageInput , __lowercase : Optional[bool] = None , __lowercase : Dict[str, int] = None , __lowercase : PILImageResampling = None , __lowercase : bool = None , __lowercase : int = None , __lowercase : Optional[bool] = None , __lowercase : Optional[float] = None , __lowercase : Optional[bool] = None , __lowercase : Optional[Union[float, List[float]]] = None , __lowercase : Optional[Union[float, List[float]]] = None , __lowercase : Optional[Union[str, TensorType]] = None , __lowercase : Union[str, ChannelDimension] = ChannelDimension.FIRST , **__lowercase : List[Any] , ):
'''simple docstring'''
__a = do_resize if do_resize is not None else self.do_resize
__a = do_rescale if do_rescale is not None else self.do_rescale
__a = do_normalize if do_normalize is not None else self.do_normalize
__a = do_center_crop if do_center_crop is not None else self.do_center_crop
__a = crop_size if crop_size is not None else self.crop_size
__a = get_size_dict(__lowercase , param_name="""crop_size""" , default_to_square=__lowercase )
__a = resample if resample is not None else self.resample
__a = rescale_factor if rescale_factor is not None else self.rescale_factor
__a = image_mean if image_mean is not None else self.image_mean
__a = image_std if image_std is not None else self.image_std
__a = size if size is not None else self.size
__a = get_size_dict(__lowercase )
if not is_batched(__lowercase ):
__a = [images]
if not valid_images(__lowercase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
# All transformations expect numpy arrays.
__a = [to_numpy_array(__lowercase ) for image in images]
if do_resize:
__a = [self.resize(image=__lowercase , size=__lowercase , resample=__lowercase ) for image in images]
if do_center_crop:
__a = [self.center_crop(image=__lowercase , size=__lowercase ) for image in images]
if do_rescale:
__a = [self.rescale(image=__lowercase , scale=__lowercase ) for image in images]
if do_normalize:
__a = [self.normalize(image=__lowercase , mean=__lowercase , std=__lowercase ) for image in images]
__a = [to_channel_dimension_format(__lowercase , __lowercase ) for image in images]
__a = {"""pixel_values""": images}
return BatchFeature(data=__lowercase , tensor_type=__lowercase )
| 302
| 0
|
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( UpperCamelCase = 1000 ):
"""simple docstring"""
return sum(e for e in range(3 , _SCREAMING_SNAKE_CASE ) if e % 3 == 0 or e % 5 == 0 )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 37
|
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
__a = """hf-internal-testing/tiny-random-t5"""
__a = AutoTokenizer.from_pretrained(__lowercase )
__a = AutoModelForSeqaSeqLM.from_pretrained(__lowercase )
__a = tokenizer("""This is me""" , return_tensors="""pt""" )
__a = model.to_bettertransformer()
self.assertTrue(any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model.named_modules() ) )
__a = model.generate(**__lowercase )
__a = model.reverse_bettertransformer()
self.assertFalse(any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model.named_modules() ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__lowercase )
__a = AutoModelForSeqaSeqLM.from_pretrained(__lowercase )
self.assertFalse(
any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) )
__a = model_reloaded.generate(**__lowercase )
self.assertTrue(torch.allclose(__lowercase , __lowercase ) )
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
__a = """hf-internal-testing/tiny-random-t5"""
__a = AutoModelForSeqaSeqLM.from_pretrained(__lowercase )
__a = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(__lowercase ):
model.save_pretrained(__lowercase )
__a = model.reverse_bettertransformer()
model.save_pretrained(__lowercase )
| 302
| 0
|
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
lowercase : Dict = TypeVar('T')
class lowerCamelCase__ ( Generic[T]):
'''simple docstring'''
_A = 42 # Cache store of keys
_A = 42 # References of the keys in cache
_A = 1_0 # Maximum capacity of cache
def __init__( self :Dict , a :int ) -> List[Any]:
__UpperCamelCase : str = deque()
__UpperCamelCase : Any = set()
if not n:
__UpperCamelCase : Tuple = sys.maxsize
elif n < 0:
raise ValueError("n should be an integer greater than 0." )
else:
__UpperCamelCase : int = n
def _lowerCamelCase ( self :Optional[int] , a :T ) -> Optional[int]:
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
__UpperCamelCase : Union[str, Any] = self.dq_store.pop()
self.key_reference.remove(__lowercase )
else:
self.dq_store.remove(__lowercase )
self.dq_store.appendleft(__lowercase )
self.key_reference.add(__lowercase )
def _lowerCamelCase ( self :Dict ) -> List[str]:
for k in self.dq_store:
print(__lowercase )
def __repr__( self :List[Any] ) -> Any:
return f'LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}'
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase : Union[str, Any] = LRUCache(4)
lru_cache.refer('A')
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer('A')
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 232
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
lowerCamelCase__ = {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/config.json""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/config.json""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/config.json""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/config.json""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/config.json""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/config.json""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json""",
}
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
__lowerCamelCase : Optional[Any] ='albert'
def __init__( self : Optional[Any] , __lowercase : Union[str, Any]=30000 , __lowercase : List[str]=128 , __lowercase : Optional[Any]=4096 , __lowercase : Dict=12 , __lowercase : Any=1 , __lowercase : Optional[Any]=64 , __lowercase : Any=16384 , __lowercase : Any=1 , __lowercase : Union[str, Any]="gelu_new" , __lowercase : List[str]=0 , __lowercase : int=0 , __lowercase : Dict=512 , __lowercase : str=2 , __lowercase : List[str]=0.02 , __lowercase : Union[str, Any]=1E-12 , __lowercase : int=0.1 , __lowercase : Any="absolute" , __lowercase : Optional[int]=0 , __lowercase : Dict=2 , __lowercase : Optional[Any]=3 , **__lowercase : Any , ):
'''simple docstring'''
super().__init__(pad_token_id=__lowercase , bos_token_id=__lowercase , eos_token_id=__lowercase , **__lowercase )
__a = vocab_size
__a = embedding_size
__a = hidden_size
__a = num_hidden_layers
__a = num_hidden_groups
__a = num_attention_heads
__a = inner_group_num
__a = hidden_act
__a = intermediate_size
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = type_vocab_size
__a = initializer_range
__a = layer_norm_eps
__a = classifier_dropout_prob
__a = position_embedding_type
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
@property
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
if self.task == "multiple-choice":
__a = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
__a = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 302
| 0
|
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'huggingface/time-series-transformer-tourism-monthly': (
'https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json'
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class lowerCAmelCase__ ( lowerCamelCase__):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 'time_series_transformer'
__SCREAMING_SNAKE_CASE = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
'num_hidden_layers': 'encoder_layers',
}
def __init__( self , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = "student_t" , __lowerCamelCase = "nll" , __lowerCamelCase = 1 , __lowerCamelCase = [1, 2, 3, 4, 5, 6, 7] , __lowerCamelCase = "mean" , __lowerCamelCase = 0 , __lowerCamelCase = 0 , __lowerCamelCase = 0 , __lowerCamelCase = 0 , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = 3_2 , __lowerCamelCase = 3_2 , __lowerCamelCase = 2 , __lowerCamelCase = 2 , __lowerCamelCase = 2 , __lowerCamelCase = 2 , __lowerCamelCase = True , __lowerCamelCase = "gelu" , __lowerCamelCase = 6_4 , __lowerCamelCase = 0.1 , __lowerCamelCase = 0.1 , __lowerCamelCase = 0.1 , __lowerCamelCase = 0.1 , __lowerCamelCase = 0.1 , __lowerCamelCase = 1_0_0 , __lowerCamelCase = 0.0_2 , __lowerCamelCase=True , **__lowerCamelCase , ) -> Dict:
_A : Union[str, Any] = prediction_length
_A : Dict = context_length or prediction_length
_A : int = distribution_output
_A : Union[str, Any] = loss
_A : Dict = input_size
_A : Optional[int] = num_time_features
_A : int = lags_sequence
_A : Any = scaling
_A : List[str] = num_dynamic_real_features
_A : List[Any] = num_static_real_features
_A : Any = num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(__lowercase) != num_static_categorical_features:
raise ValueError(
"The cardinality should be a list of the same length as `num_static_categorical_features`")
_A : Any = cardinality
else:
_A : Optional[Any] = [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(__lowercase) != num_static_categorical_features:
raise ValueError(
"The embedding dimension should be a list of the same length as `num_static_categorical_features`")
_A : int = embedding_dimension
else:
_A : Any = [min(5_0 , (cat + 1) // 2) for cat in self.cardinality]
_A : Optional[Any] = num_parallel_samples
# Transformer architecture configuration
_A : Tuple = input_size * len(__lowercase) + self._number_of_features
_A : Any = d_model
_A : List[Any] = encoder_attention_heads
_A : Optional[int] = decoder_attention_heads
_A : int = encoder_ffn_dim
_A : Optional[Any] = decoder_ffn_dim
_A : Tuple = encoder_layers
_A : str = decoder_layers
_A : Any = dropout
_A : Any = attention_dropout
_A : List[Any] = activation_dropout
_A : Any = encoder_layerdrop
_A : List[str] = decoder_layerdrop
_A : Tuple = activation_function
_A : Any = init_std
_A : List[str] = use_cache
super().__init__(is_encoder_decoder=__lowercase , **__lowercase)
@property
def _lowerCamelCase ( self) -> Tuple:
return (
sum(self.embedding_dimension)
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 11
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase__ = {
"""configuration_blip""": [
"""BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BlipConfig""",
"""BlipTextConfig""",
"""BlipVisionConfig""",
],
"""processing_blip""": ["""BlipProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""BlipImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BlipModel""",
"""BlipPreTrainedModel""",
"""BlipForConditionalGeneration""",
"""BlipForQuestionAnswering""",
"""BlipVisionModel""",
"""BlipTextModel""",
"""BlipForImageTextRetrieval""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFBlipModel""",
"""TFBlipPreTrainedModel""",
"""TFBlipForConditionalGeneration""",
"""TFBlipForQuestionAnswering""",
"""TFBlipVisionModel""",
"""TFBlipTextModel""",
"""TFBlipForImageTextRetrieval""",
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 302
| 0
|
import numpy
# List of input, output pairs
UpperCAmelCase : int = (
((5, 2, 3), 15),
((6, 5, 9), 25),
((11, 12, 13), 41),
((1, 1, 1), 8),
((11, 12, 13), 41),
)
UpperCAmelCase : int = (((5_15, 22, 13), 5_55), ((61, 35, 49), 1_50))
UpperCAmelCase : Optional[Any] = [2, 4, 1, 5]
UpperCAmelCase : str = len(train_data)
UpperCAmelCase : Dict = 0.009
def _SCREAMING_SNAKE_CASE ( a , a="train" ) -> str:
return calculate_hypothesis_value(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) - output(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( a ) -> Tuple:
__A : Optional[Any] = 0
for i in range(len(_SCREAMING_SNAKE_CASE ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def _SCREAMING_SNAKE_CASE ( a , a ) -> Dict:
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def _SCREAMING_SNAKE_CASE ( a , a ) -> Union[str, Any]:
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def _SCREAMING_SNAKE_CASE ( a , a=m ) -> Tuple:
__A : Optional[int] = 0
for i in range(_SCREAMING_SNAKE_CASE ):
if index == -1:
summation_value += _error(_SCREAMING_SNAKE_CASE )
else:
summation_value += _error(_SCREAMING_SNAKE_CASE ) * train_data[i][0][index]
return summation_value
def _SCREAMING_SNAKE_CASE ( a ) -> List[Any]:
__A : str = summation_of_cost_derivative(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) / m
return cost_derivative_value
def _SCREAMING_SNAKE_CASE ( ) -> List[str]:
global parameter_vector
# Tune these values to set a tolerance value for predicted output
__A : List[str] = 0.000_002
__A : Union[str, Any] = 0
__A : Dict = 0
while True:
j += 1
__A : Dict = [0, 0, 0, 0]
for i in range(0 , len(_SCREAMING_SNAKE_CASE ) ):
__A : Dict = get_cost_derivative(i - 1 )
__A : List[str] = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=_SCREAMING_SNAKE_CASE , rtol=_SCREAMING_SNAKE_CASE , ):
break
__A : Any = temp_parameter_vector
print(('Number of iterations:', j) )
def _SCREAMING_SNAKE_CASE ( ) -> Tuple:
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
print(('Actual output value:', output(_SCREAMING_SNAKE_CASE , 'test' )) )
print(('Hypothesis output:', calculate_hypothesis_value(_SCREAMING_SNAKE_CASE , 'test' )) )
if __name__ == "__main__":
run_gradient_descent()
print('''\nTesting gradient descent for a linear hypothesis function.\n''')
test_gradient_descent()
| 280
|
class SCREAMING_SNAKE_CASE :
def __init__( self : List[Any] , __lowercase : Union[str, Any] ):
'''simple docstring'''
__a = val
__a = None
__a = None
def UpperCamelCase_ ( self : Union[str, Any] , __lowercase : Any ):
'''simple docstring'''
if self.val:
if val < self.val:
if self.left is None:
__a = Node(__lowercase )
else:
self.left.insert(__lowercase )
elif val > self.val:
if self.right is None:
__a = Node(__lowercase )
else:
self.right.insert(__lowercase )
else:
__a = val
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
if root:
inorder(root.left , _SCREAMING_SNAKE_CASE )
res.append(root.val )
inorder(root.right , _SCREAMING_SNAKE_CASE )
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
if len(_SCREAMING_SNAKE_CASE ) == 0:
return arr
__a = Node(arr[0] )
for i in range(1 , len(_SCREAMING_SNAKE_CASE ) ):
root.insert(arr[i] )
# Traverse BST in order.
__a = []
inorder(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return res
if __name__ == "__main__":
print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
| 302
| 0
|
def _snake_case ( lowerCAmelCase : list[list[int]] , lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : set ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = len(_SCREAMING_SNAKE_CASE ), len(grid[0] )
if (
min(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
SCREAMING_SNAKE_CASE_ : Any = 0
count += depth_first_search(_SCREAMING_SNAKE_CASE , row + 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
count += depth_first_search(_SCREAMING_SNAKE_CASE , row - 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
count += depth_first_search(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , col + 1 , _SCREAMING_SNAKE_CASE )
count += depth_first_search(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , col - 1 , _SCREAMING_SNAKE_CASE )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 18
|
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
__a = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__lowercase , """width_multiplier""" ) )
class SCREAMING_SNAKE_CASE :
def __init__( self : Dict , __lowercase : Union[str, Any] , __lowercase : Dict=13 , __lowercase : int=64 , __lowercase : Tuple=2 , __lowercase : Tuple=3 , __lowercase : Tuple="swish" , __lowercase : List[Any]=3 , __lowercase : List[str]=32 , __lowercase : int=0.1 , __lowercase : Union[str, Any]=0.02 , __lowercase : Optional[int]=True , __lowercase : Dict=True , __lowercase : Tuple=10 , __lowercase : str=None , __lowercase : Optional[Any]=0.25 , __lowercase : str=0.0 , __lowercase : Optional[Any]=0.0 , ):
'''simple docstring'''
__a = parent
__a = batch_size
__a = image_size
__a = patch_size
__a = num_channels
__a = make_divisible(512 * width_multiplier , divisor=8 )
__a = hidden_act
__a = conv_kernel_size
__a = output_stride
__a = classifier_dropout_prob
__a = use_labels
__a = is_training
__a = num_labels
__a = initializer_range
__a = scope
__a = width_multiplier
__a = ffn_dropout
__a = attn_dropout
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
__a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a = None
__a = None
if self.use_labels:
__a = ids_tensor([self.batch_size] , self.num_labels )
__a = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__a = self.get_config()
return config, pixel_values, labels, pixel_labels
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
return MobileViTVaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , )
def UpperCamelCase_ ( self : Tuple , __lowercase : Optional[Any] , __lowercase : int , __lowercase : Optional[Any] , __lowercase : Tuple ):
'''simple docstring'''
__a = MobileViTVaModel(config=__lowercase )
model.to(__lowercase )
model.eval()
__a = model(__lowercase )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def UpperCamelCase_ ( self : Union[str, Any] , __lowercase : List[Any] , __lowercase : str , __lowercase : Optional[int] , __lowercase : Union[str, Any] ):
'''simple docstring'''
__a = self.num_labels
__a = MobileViTVaForImageClassification(__lowercase )
model.to(__lowercase )
model.eval()
__a = model(__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self : int , __lowercase : str , __lowercase : Any , __lowercase : int , __lowercase : List[str] ):
'''simple docstring'''
__a = self.num_labels
__a = MobileViTVaForSemanticSegmentation(__lowercase )
model.to(__lowercase )
model.eval()
__a = model(__lowercase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
__a = model(__lowercase , labels=__lowercase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
__a = self.prepare_config_and_inputs()
__a , __a , __a , __a = config_and_inputs
__a = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
__lowerCamelCase : List[Any] =(
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
__lowerCamelCase : Any =(
{
'feature-extraction': MobileViTVaModel,
'image-classification': MobileViTVaForImageClassification,
'image-segmentation': MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__lowerCamelCase : Dict =False
__lowerCamelCase : Optional[Any] =False
__lowerCamelCase : int =False
__lowerCamelCase : Any =False
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
__a = MobileViTVaModelTester(self )
__a = MobileViTVaConfigTester(self , config_class=__lowercase , has_text_modality=__lowercase )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""MobileViTV2 does not use inputs_embeds""" )
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
pass
@unittest.skip(reason="""MobileViTV2 does not support input and output embeddings""" )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
pass
@unittest.skip(reason="""MobileViTV2 does not output attentions""" )
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason="""Got `CUDA error: misaligned address` for tests after this one being run.""" )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = model_class(__lowercase )
__a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a = [*signature.parameters.keys()]
__a = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __lowercase )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowercase )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
def check_hidden_states_output(__lowercase : List[str] , __lowercase : Optional[int] , __lowercase : List[str] ):
__a = model_class(__lowercase )
model.to(__lowercase )
model.eval()
with torch.no_grad():
__a = model(**self._prepare_for_class(__lowercase , __lowercase ) )
__a = outputs.hidden_states
__a = 5
self.assertEqual(len(__lowercase ) , __lowercase )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
__a = 2
for i in range(len(__lowercase ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = True
check_hidden_states_output(__lowercase , __lowercase , __lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__a = True
check_hidden_states_output(__lowercase , __lowercase , __lowercase )
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowercase )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__lowercase )
@slow
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a = MobileViTVaModel.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
def lowerCAmelCase__ ( ):
"""simple docstring"""
__a = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@cached_property
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
return (
MobileViTImageProcessor.from_pretrained("""apple/mobilevitv2-1.0-imagenet1k-256""" )
if is_vision_available()
else None
)
@slow
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
__a = MobileViTVaForImageClassification.from_pretrained("""apple/mobilevitv2-1.0-imagenet1k-256""" ).to(
__lowercase )
__a = self.default_image_processor
__a = prepare_img()
__a = image_processor(images=__lowercase , return_tensors="""pt""" ).to(__lowercase )
# forward pass
with torch.no_grad():
__a = model(**__lowercase )
# verify the logits
__a = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __lowercase )
__a = torch.tensor([-1.6336E00, -7.3204E-02, -5.1883E-01] ).to(__lowercase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowercase , atol=1E-4 ) )
@slow
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
__a = MobileViTVaForSemanticSegmentation.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
__a = model.to(__lowercase )
__a = MobileViTImageProcessor.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
__a = prepare_img()
__a = image_processor(images=__lowercase , return_tensors="""pt""" ).to(__lowercase )
# forward pass
with torch.no_grad():
__a = model(**__lowercase )
__a = outputs.logits
# verify the logits
__a = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , __lowercase )
__a = torch.tensor(
[
[[7.0863, 7.1525, 6.8201], [6.6931, 6.8770, 6.8933], [6.2978, 7.0366, 6.9636]],
[[-3.7134, -3.6712, -3.6675], [-3.5825, -3.3549, -3.4777], [-3.3435, -3.3979, -3.2857]],
[[-2.9329, -2.8003, -2.7369], [-3.0564, -2.4780, -2.0207], [-2.6889, -1.9298, -1.7640]],
] , device=__lowercase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __lowercase , atol=1E-4 ) )
@slow
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
__a = MobileViTVaForSemanticSegmentation.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
__a = model.to(__lowercase )
__a = MobileViTImageProcessor.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
__a = prepare_img()
__a = image_processor(images=__lowercase , return_tensors="""pt""" ).to(__lowercase )
# forward pass
with torch.no_grad():
__a = model(**__lowercase )
__a = outputs.logits.detach().cpu()
__a = image_processor.post_process_semantic_segmentation(outputs=__lowercase , target_sizes=[(50, 60)] )
__a = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , __lowercase )
__a = image_processor.post_process_semantic_segmentation(outputs=__lowercase )
__a = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , __lowercase )
| 302
| 0
|
"""simple docstring"""
import argparse
import os.path as osp
import re
import torch
from safetensors.torch import load_file, save_file
# =================#
# UNet Conversion #
# =================#
lowerCamelCase_ : Union[str, Any] = [
# (stable-diffusion, HF Diffusers)
("""time_embed.0.weight""", """time_embedding.linear_1.weight"""),
("""time_embed.0.bias""", """time_embedding.linear_1.bias"""),
("""time_embed.2.weight""", """time_embedding.linear_2.weight"""),
("""time_embed.2.bias""", """time_embedding.linear_2.bias"""),
("""input_blocks.0.0.weight""", """conv_in.weight"""),
("""input_blocks.0.0.bias""", """conv_in.bias"""),
("""out.0.weight""", """conv_norm_out.weight"""),
("""out.0.bias""", """conv_norm_out.bias"""),
("""out.2.weight""", """conv_out.weight"""),
("""out.2.bias""", """conv_out.bias"""),
]
lowerCamelCase_ : Dict = [
# (stable-diffusion, HF Diffusers)
("""in_layers.0""", """norm1"""),
("""in_layers.2""", """conv1"""),
("""out_layers.0""", """norm2"""),
("""out_layers.3""", """conv2"""),
("""emb_layers.1""", """time_emb_proj"""),
("""skip_connection""", """conv_shortcut"""),
]
lowerCamelCase_ : Optional[int] = []
# hardcoded number of downblocks and resnets/attentions...
# would need smarter logic for other networks.
for i in range(4):
# loop over downblocks/upblocks
for j in range(2):
# loop over resnets/attentions for downblocks
lowerCamelCase_ : Optional[int] = F'down_blocks.{i}.resnets.{j}.'
lowerCamelCase_ : Dict = F'input_blocks.{3*i + j + 1}.0.'
unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix))
if i < 3:
# no attention layers in down_blocks.3
lowerCamelCase_ : Optional[Any] = F'down_blocks.{i}.attentions.{j}.'
lowerCamelCase_ : List[str] = F'input_blocks.{3*i + j + 1}.1.'
unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix))
for j in range(3):
# loop over resnets/attentions for upblocks
lowerCamelCase_ : Dict = F'up_blocks.{i}.resnets.{j}.'
lowerCamelCase_ : Any = F'output_blocks.{3*i + j}.0.'
unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix))
if i > 0:
# no attention layers in up_blocks.0
lowerCamelCase_ : List[Any] = F'up_blocks.{i}.attentions.{j}.'
lowerCamelCase_ : str = F'output_blocks.{3*i + j}.1.'
unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix))
if i < 3:
# no downsample in down_blocks.3
lowerCamelCase_ : str = F'down_blocks.{i}.downsamplers.0.conv.'
lowerCamelCase_ : Optional[int] = F'input_blocks.{3*(i+1)}.0.op.'
unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix))
# no upsample in up_blocks.3
lowerCamelCase_ : Optional[int] = F'up_blocks.{i}.upsamplers.0.'
lowerCamelCase_ : str = F'output_blocks.{3*i + 2}.{1 if i == 0 else 2}.'
unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix))
lowerCamelCase_ : Tuple = """mid_block.attentions.0."""
lowerCamelCase_ : Tuple = """middle_block.1."""
unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix))
for j in range(2):
lowerCamelCase_ : Optional[int] = F'mid_block.resnets.{j}.'
lowerCamelCase_ : int = F'middle_block.{2*j}.'
unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix))
def _A ( lowercase ):
"""simple docstring"""
a ={k: k for k in unet_state_dict.keys()}
for sd_name, hf_name in unet_conversion_map:
a =sd_name
for k, v in mapping.items():
if "resnets" in k:
for sd_part, hf_part in unet_conversion_map_resnet:
a =v.replace(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
a =v
for k, v in mapping.items():
for sd_part, hf_part in unet_conversion_map_layer:
a =v.replace(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
a =v
a ={v: unet_state_dict[k] for k, v in mapping.items()}
return new_state_dict
# ================#
# VAE Conversion #
# ================#
lowerCamelCase_ : Any = [
# (stable-diffusion, HF Diffusers)
("""nin_shortcut""", """conv_shortcut"""),
("""norm_out""", """conv_norm_out"""),
("""mid.attn_1.""", """mid_block.attentions.0."""),
]
for i in range(4):
# down_blocks have two resnets
for j in range(2):
lowerCamelCase_ : Union[str, Any] = F'encoder.down_blocks.{i}.resnets.{j}.'
lowerCamelCase_ : Union[str, Any] = F'encoder.down.{i}.block.{j}.'
vae_conversion_map.append((sd_down_prefix, hf_down_prefix))
if i < 3:
lowerCamelCase_ : Dict = F'down_blocks.{i}.downsamplers.0.'
lowerCamelCase_ : List[str] = F'down.{i}.downsample.'
vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix))
lowerCamelCase_ : Dict = F'up_blocks.{i}.upsamplers.0.'
lowerCamelCase_ : str = F'up.{3-i}.upsample.'
vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix))
# up_blocks have three resnets
# also, up blocks in hf are numbered in reverse from sd
for j in range(3):
lowerCamelCase_ : Optional[int] = F'decoder.up_blocks.{i}.resnets.{j}.'
lowerCamelCase_ : Dict = F'decoder.up.{3-i}.block.{j}.'
vae_conversion_map.append((sd_up_prefix, hf_up_prefix))
# this part accounts for mid blocks in both the encoder and the decoder
for i in range(2):
lowerCamelCase_ : Optional[Any] = F'mid_block.resnets.{i}.'
lowerCamelCase_ : Any = F'mid.block_{i+1}.'
vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix))
lowerCamelCase_ : List[Any] = [
# (stable-diffusion, HF Diffusers)
("""norm.""", """group_norm."""),
("""q.""", """query."""),
("""k.""", """key."""),
("""v.""", """value."""),
("""proj_out.""", """proj_attn."""),
]
def _A ( lowercase ):
"""simple docstring"""
return w.reshape(*w.shape , 1 , 1 )
def _A ( lowercase ):
"""simple docstring"""
a ={k: k for k in vae_state_dict.keys()}
for k, v in mapping.items():
for sd_part, hf_part in vae_conversion_map:
a =v.replace(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
a =v
for k, v in mapping.items():
if "attentions" in k:
for sd_part, hf_part in vae_conversion_map_attn:
a =v.replace(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
a =v
a ={v: vae_state_dict[k] for k, v in mapping.items()}
a =['''q''', '''k''', '''v''', '''proj_out''']
for k, v in new_state_dict.items():
for weight_name in weights_to_convert:
if f'''mid.attn_1.{weight_name}.weight''' in k:
print(f'''Reshaping {k} for SD format''' )
a =reshape_weight_for_sd(_SCREAMING_SNAKE_CASE )
return new_state_dict
# =========================#
# Text Encoder Conversion #
# =========================#
lowerCamelCase_ : Union[str, Any] = [
# (stable-diffusion, HF Diffusers)
("""resblocks.""", """text_model.encoder.layers."""),
("""ln_1""", """layer_norm1"""),
("""ln_2""", """layer_norm2"""),
(""".c_fc.""", """.fc1."""),
(""".c_proj.""", """.fc2."""),
(""".attn""", """.self_attn"""),
("""ln_final.""", """transformer.text_model.final_layer_norm."""),
("""token_embedding.weight""", """transformer.text_model.embeddings.token_embedding.weight"""),
("""positional_embedding""", """transformer.text_model.embeddings.position_embedding.weight"""),
]
lowerCamelCase_ : List[Any] = {re.escape(x[1]): x[0] for x in textenc_conversion_lst}
lowerCamelCase_ : int = re.compile("""|""".join(protected.keys()))
# Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp
lowerCamelCase_ : Union[str, Any] = {"""q""": 0, """k""": 1, """v""": 2}
def _A ( lowercase ):
"""simple docstring"""
a ={}
a ={}
a ={}
for k, v in text_enc_dict.items():
if (
k.endswith('''.self_attn.q_proj.weight''' )
or k.endswith('''.self_attn.k_proj.weight''' )
or k.endswith('''.self_attn.v_proj.weight''' )
):
a =k[: -len('''.q_proj.weight''' )]
a =k[-len('''q_proj.weight''' )]
if k_pre not in capture_qkv_weight:
a =[None, None, None]
a =v
continue
if (
k.endswith('''.self_attn.q_proj.bias''' )
or k.endswith('''.self_attn.k_proj.bias''' )
or k.endswith('''.self_attn.v_proj.bias''' )
):
a =k[: -len('''.q_proj.bias''' )]
a =k[-len('''q_proj.bias''' )]
if k_pre not in capture_qkv_bias:
a =[None, None, None]
a =v
continue
a =textenc_pattern.sub(lambda lowercase : protected[re.escape(m.group(0 ) )] , _SCREAMING_SNAKE_CASE )
a =v
for k_pre, tensors in capture_qkv_weight.items():
if None in tensors:
raise Exception('''CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing''' )
a =textenc_pattern.sub(lambda lowercase : protected[re.escape(m.group(0 ) )] , _SCREAMING_SNAKE_CASE )
a =torch.cat(_SCREAMING_SNAKE_CASE )
for k_pre, tensors in capture_qkv_bias.items():
if None in tensors:
raise Exception('''CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing''' )
a =textenc_pattern.sub(lambda lowercase : protected[re.escape(m.group(0 ) )] , _SCREAMING_SNAKE_CASE )
a =torch.cat(_SCREAMING_SNAKE_CASE )
return new_state_dict
def _A ( lowercase ):
"""simple docstring"""
return text_enc_dict
if __name__ == "__main__":
lowerCamelCase_ : Any = argparse.ArgumentParser()
parser.add_argument("""--model_path""", default=None, type=str, required=True, help="""Path to the model to convert.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument("""--half""", action="""store_true""", help="""Save weights in half precision.""")
parser.add_argument(
"""--use_safetensors""", action="""store_true""", help="""Save weights use safetensors, default is ckpt."""
)
lowerCamelCase_ : Tuple = parser.parse_args()
assert args.model_path is not None, "Must provide a model path!"
assert args.checkpoint_path is not None, "Must provide a checkpoint path!"
# Path for safetensors
lowerCamelCase_ : Optional[int] = osp.join(args.model_path, """unet""", """diffusion_pytorch_model.safetensors""")
lowerCamelCase_ : Tuple = osp.join(args.model_path, """vae""", """diffusion_pytorch_model.safetensors""")
lowerCamelCase_ : Optional[Any] = osp.join(args.model_path, """text_encoder""", """model.safetensors""")
# Load models from safetensors if it exists, if it doesn't pytorch
if osp.exists(unet_path):
lowerCamelCase_ : Union[str, Any] = load_file(unet_path, device="""cpu""")
else:
lowerCamelCase_ : Dict = osp.join(args.model_path, """unet""", """diffusion_pytorch_model.bin""")
lowerCamelCase_ : Dict = torch.load(unet_path, map_location="""cpu""")
if osp.exists(vae_path):
lowerCamelCase_ : List[Any] = load_file(vae_path, device="""cpu""")
else:
lowerCamelCase_ : Optional[Any] = osp.join(args.model_path, """vae""", """diffusion_pytorch_model.bin""")
lowerCamelCase_ : int = torch.load(vae_path, map_location="""cpu""")
if osp.exists(text_enc_path):
lowerCamelCase_ : List[Any] = load_file(text_enc_path, device="""cpu""")
else:
lowerCamelCase_ : Union[str, Any] = osp.join(args.model_path, """text_encoder""", """pytorch_model.bin""")
lowerCamelCase_ : Dict = torch.load(text_enc_path, map_location="""cpu""")
# Convert the UNet model
lowerCamelCase_ : Optional[int] = convert_unet_state_dict(unet_state_dict)
lowerCamelCase_ : int = {"""model.diffusion_model.""" + k: v for k, v in unet_state_dict.items()}
# Convert the VAE model
lowerCamelCase_ : Tuple = convert_vae_state_dict(vae_state_dict)
lowerCamelCase_ : Optional[Any] = {"""first_stage_model.""" + k: v for k, v in vae_state_dict.items()}
# Easiest way to identify v2.0 model seems to be that the text encoder (OpenCLIP) is deeper
lowerCamelCase_ : Union[str, Any] = """text_model.encoder.layers.22.layer_norm2.bias""" in text_enc_dict
if is_vaa_model:
# Need to add the tag 'transformer' in advance so we can knock it out from the final layer-norm
lowerCamelCase_ : Tuple = {"""transformer.""" + k: v for k, v in text_enc_dict.items()}
lowerCamelCase_ : Optional[Any] = convert_text_enc_state_dict_vaa(text_enc_dict)
lowerCamelCase_ : Tuple = {"""cond_stage_model.model.""" + k: v for k, v in text_enc_dict.items()}
else:
lowerCamelCase_ : Tuple = convert_text_enc_state_dict(text_enc_dict)
lowerCamelCase_ : List[Any] = {"""cond_stage_model.transformer.""" + k: v for k, v in text_enc_dict.items()}
# Put together new checkpoint
lowerCamelCase_ : Optional[int] = {**unet_state_dict, **vae_state_dict, **text_enc_dict}
if args.half:
lowerCamelCase_ : Optional[int] = {k: v.half() for k, v in state_dict.items()}
if args.use_safetensors:
save_file(state_dict, args.checkpoint_path)
else:
lowerCamelCase_ : Union[str, Any] = {"""state_dict""": state_dict}
torch.save(state_dict, args.checkpoint_path)
| 81
|
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import torch
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
@dataclass
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
__lowerCamelCase : Union[List[np.ndarray], torch.FloatTensor]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_text_to_video_synth import TextToVideoSDPipeline
from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401
from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
| 302
| 0
|
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __A ( lowerCamelCase__ ):
'''simple docstring'''
lowerCAmelCase : str = ['image_processor', 'tokenizer']
lowerCAmelCase : Union[str, Any] = 'ChineseCLIPImageProcessor'
lowerCAmelCase : Optional[int] = ('BertTokenizer', 'BertTokenizerFast')
def __init__( self : List[Any] ,_snake_case : List[str]=None ,_snake_case : Tuple=None ,**_snake_case : Dict ) -> List[Any]:
"""simple docstring"""
lowercase__ : List[Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' ,__lowercase ,)
lowercase__ : List[str] = kwargs.pop('''feature_extractor''' )
lowercase__ : int = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(__lowercase ,__lowercase )
lowercase__ : Dict = self.image_processor
def __call__( self : int ,_snake_case : List[Any]=None ,_snake_case : Optional[Any]=None ,_snake_case : Union[str, Any]=None ,**_snake_case : Union[str, Any] ) -> Dict:
"""simple docstring"""
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
lowercase__ : str = self.tokenizer(__lowercase ,return_tensors=__lowercase ,**__lowercase )
if images is not None:
lowercase__ : Optional[int] = self.image_processor(__lowercase ,return_tensors=__lowercase ,**__lowercase )
if text is not None and images is not None:
lowercase__ : Union[str, Any] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__lowercase ) ,tensor_type=__lowercase )
def UpperCAmelCase ( self : Dict ,*_snake_case : Optional[Any] ,**_snake_case : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
return self.tokenizer.batch_decode(*__lowercase ,**__lowercase )
def UpperCAmelCase ( self : Dict ,*_snake_case : str ,**_snake_case : Any ) -> Optional[Any]:
"""simple docstring"""
return self.tokenizer.decode(*__lowercase ,**__lowercase )
@property
def UpperCAmelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ : Union[str, Any] = self.tokenizer.model_input_names
lowercase__ : List[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def UpperCAmelCase ( self : Tuple ) -> Any:
"""simple docstring"""
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' ,__lowercase ,)
return self.image_processor_class
| 16
|
import string
import numpy
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
return b if a == 0 else greatest_common_divisor(b % a , _SCREAMING_SNAKE_CASE )
class SCREAMING_SNAKE_CASE :
__lowerCamelCase : List[str] =string.ascii_uppercase + string.digits
# This cipher takes alphanumerics into account
# i.e. a total of 36 characters
# take x and return x % len(key_string)
__lowerCamelCase : List[Any] =numpy.vectorize(lambda lowerCamelCase__ : x % 36 )
__lowerCamelCase : Optional[Any] =numpy.vectorize(lowerCamelCase__ )
def __init__( self : Union[str, Any] , __lowercase : numpy.ndarray ):
'''simple docstring'''
__a = self.modulus(__lowercase ) # mod36 calc's on the encrypt key
self.check_determinant() # validate the determinant of the encryption key
__a = encrypt_key.shape[0]
def UpperCamelCase_ ( self : Dict , __lowercase : str ):
'''simple docstring'''
return self.key_string.index(__lowercase )
def UpperCamelCase_ ( self : Dict , __lowercase : int ):
'''simple docstring'''
return self.key_string[round(__lowercase )]
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
__a = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
__a = det % len(self.key_string )
__a = len(self.key_string )
if greatest_common_divisor(__lowercase , len(self.key_string ) ) != 1:
__a = (
F"determinant modular {req_l} of encryption key({det}) "
F"is not co prime w.r.t {req_l}.\nTry another key."
)
raise ValueError(__lowercase )
def UpperCamelCase_ ( self : Dict , __lowercase : str ):
'''simple docstring'''
__a = [char for char in text.upper() if char in self.key_string]
__a = chars[-1]
while len(__lowercase ) % self.break_key != 0:
chars.append(__lowercase )
return "".join(__lowercase )
def UpperCamelCase_ ( self : List[str] , __lowercase : str ):
'''simple docstring'''
__a = self.process_text(text.upper() )
__a = """"""
for i in range(0 , len(__lowercase ) - self.break_key + 1 , self.break_key ):
__a = text[i : i + self.break_key]
__a = [self.replace_letters(__lowercase ) for char in batch]
__a = numpy.array([vec] ).T
__a = self.modulus(self.encrypt_key.dot(__lowercase ) ).T.tolist()[
0
]
__a = """""".join(
self.replace_digits(__lowercase ) for num in batch_encrypted )
encrypted += encrypted_batch
return encrypted
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
__a = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
__a = det % len(self.key_string )
__a = None
for i in range(len(self.key_string ) ):
if (det * i) % len(self.key_string ) == 1:
__a = i
break
__a = (
det_inv
* numpy.linalg.det(self.encrypt_key )
* numpy.linalg.inv(self.encrypt_key )
)
return self.to_int(self.modulus(__lowercase ) )
def UpperCamelCase_ ( self : Any , __lowercase : str ):
'''simple docstring'''
__a = self.make_decrypt_key()
__a = self.process_text(text.upper() )
__a = """"""
for i in range(0 , len(__lowercase ) - self.break_key + 1 , self.break_key ):
__a = text[i : i + self.break_key]
__a = [self.replace_letters(__lowercase ) for char in batch]
__a = numpy.array([vec] ).T
__a = self.modulus(decrypt_key.dot(__lowercase ) ).T.tolist()[0]
__a = """""".join(
self.replace_digits(__lowercase ) for num in batch_decrypted )
decrypted += decrypted_batch
return decrypted
def lowerCAmelCase__ ( ):
"""simple docstring"""
__a = int(input("""Enter the order of the encryption key: """ ) )
__a = []
print("""Enter each row of the encryption key with space separated integers""" )
for _ in range(_SCREAMING_SNAKE_CASE ):
__a = [int(_SCREAMING_SNAKE_CASE ) for x in input().split()]
hill_matrix.append(_SCREAMING_SNAKE_CASE )
__a = HillCipher(numpy.array(_SCREAMING_SNAKE_CASE ) )
print("""Would you like to encrypt or decrypt some text? (1 or 2)""" )
__a = input("""\n1. Encrypt\n2. Decrypt\n""" )
if option == "1":
__a = input("""What text would you like to encrypt?: """ )
print("""Your encrypted text is:""" )
print(hc.encrypt(_SCREAMING_SNAKE_CASE ) )
elif option == "2":
__a = input("""What text would you like to decrypt?: """ )
print("""Your decrypted text is:""" )
print(hc.decrypt(_SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 302
| 0
|
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
snake_case : Optional[Any] = logging.getLogger(__name__)
class _snake_case ( lowerCamelCase__ ):
UpperCamelCase__ = 'sequence-classification'
def __init__( self , _a ):
if type(__lowercase ) == dict:
__magic_name__ : Tuple = Namespace(**__lowercase )
__magic_name__ : Any = glue_output_modes[hparams.task]
__magic_name__ : Union[str, Any] = glue_tasks_num_labels[hparams.task]
super().__init__(__lowercase , __lowercase , self.mode )
def SCREAMING_SNAKE_CASE ( self , **_a ):
return self.model(**__lowercase )
def SCREAMING_SNAKE_CASE ( self , _a , _a ):
__magic_name__ : int = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
__magic_name__ : str = batch[2] if self.config.model_type in ["bert", "xlnet", "albert"] else None
__magic_name__ : Tuple = self(**__lowercase )
__magic_name__ : Dict = outputs[0]
__magic_name__ : List[str] = self.trainer.lr_schedulers[0]["scheduler"]
__magic_name__ : int = {"loss": loss, "rate": lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Tuple = self.hparams
__magic_name__ : Any = processors[args.task]()
__magic_name__ : Any = processor.get_labels()
for mode in ["train", "dev"]:
__magic_name__ : Optional[int] = self._feature_file(__lowercase )
if os.path.exists(__lowercase ) and not args.overwrite_cache:
logger.info("Loading features from cached file %s" , __lowercase )
else:
logger.info("Creating features from dataset file at %s" , args.data_dir )
__magic_name__ : int = (
processor.get_dev_examples(args.data_dir )
if mode == "dev"
else processor.get_train_examples(args.data_dir )
)
__magic_name__ : Tuple = convert_examples_to_features(
__lowercase , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info("Saving features into cached file %s" , __lowercase )
torch.save(__lowercase , __lowercase )
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a = False ):
__magic_name__ : List[str] = "dev" if mode == "test" else mode
__magic_name__ : List[Any] = self._feature_file(__lowercase )
logger.info("Loading features from cached file %s" , __lowercase )
__magic_name__ : List[Any] = torch.load(__lowercase )
__magic_name__ : Any = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
__magic_name__ : int = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
__magic_name__ : Optional[Any] = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
__magic_name__ : str = torch.tensor([f.label for f in features] , dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
__magic_name__ : List[str] = torch.tensor([f.label for f in features] , dtype=torch.float )
return DataLoader(
TensorDataset(__lowercase , __lowercase , __lowercase , __lowercase ) , batch_size=__lowercase , shuffle=__lowercase , )
def SCREAMING_SNAKE_CASE ( self , _a , _a ):
__magic_name__ : Optional[Any] = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
__magic_name__ : Tuple = batch[2] if self.config.model_type in ["bert", "xlnet", "albert"] else None
__magic_name__ : Optional[Any] = self(**__lowercase )
__magic_name__ , __magic_name__ : int = outputs[:2]
__magic_name__ : Union[str, Any] = logits.detach().cpu().numpy()
__magic_name__ : Any = inputs["labels"].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def SCREAMING_SNAKE_CASE ( self , _a ):
__magic_name__ : Tuple = torch.stack([x["val_loss"] for x in outputs] ).mean().detach().cpu().item()
__magic_name__ : Tuple = np.concatenate([x["pred"] for x in outputs] , axis=0 )
if self.hparams.glue_output_mode == "classification":
__magic_name__ : Union[str, Any] = np.argmax(__lowercase , axis=1 )
elif self.hparams.glue_output_mode == "regression":
__magic_name__ : List[str] = np.squeeze(__lowercase )
__magic_name__ : int = np.concatenate([x["target"] for x in outputs] , axis=0 )
__magic_name__ : Dict = [[] for _ in range(out_label_ids.shape[0] )]
__magic_name__ : str = [[] for _ in range(out_label_ids.shape[0] )]
__magic_name__ : Optional[int] = {**{"val_loss": val_loss_mean}, **compute_metrics(self.hparams.task , __lowercase , __lowercase )}
__magic_name__ : Optional[int] = dict(results.items() )
__magic_name__ : List[str] = results
return ret, preds_list, out_label_list
def SCREAMING_SNAKE_CASE ( self , _a ):
__magic_name__ , __magic_name__ , __magic_name__ : int = self._eval_end(__lowercase )
__magic_name__ : Dict = ret["log"]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def SCREAMING_SNAKE_CASE ( self , _a ):
__magic_name__ , __magic_name__ , __magic_name__ : Optional[Any] = self._eval_end(__lowercase )
__magic_name__ : Tuple = ret["log"]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def SCREAMING_SNAKE_CASE ( _a , _a ):
BaseTransformer.add_model_specific_args(__lowercase , __lowercase )
parser.add_argument(
"--max_seq_length" , default=128 , type=__lowercase , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--task" , default="" , type=__lowercase , required=__lowercase , help="The GLUE task to run" , )
parser.add_argument(
"--gpus" , default=0 , type=__lowercase , help="The number of GPUs allocated for this, it is by default 0 meaning none" , )
parser.add_argument(
"--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets" )
return parser
def lowerCAmelCase_ ( ) -> Dict:
'''simple docstring'''
__magic_name__ : str = argparse.ArgumentParser()
add_generic_args(_SCREAMING_SNAKE_CASE , os.getcwd() )
__magic_name__ : str = GLUETransformer.add_model_specific_args(_SCREAMING_SNAKE_CASE , os.getcwd() )
__magic_name__ : List[Any] = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
__magic_name__ : List[Any] = os.path.join(
"./results" , F'''{args.task}_{time.strftime("%Y%m%d_%H%M%S" )}''' , )
os.makedirs(args.output_dir )
__magic_name__ : Dict = GLUETransformer(_SCREAMING_SNAKE_CASE )
__magic_name__ : Union[str, Any] = generic_train(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
__magic_name__ : int = sorted(glob.glob(os.path.join(args.output_dir , "checkpoint-epoch=*.ckpt" ) , recursive=_SCREAMING_SNAKE_CASE ) )
__magic_name__ : List[Any] = model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 281
|
from typing import List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"""huggingface/autoformer-tourism-monthly""": """https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json""",
}
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
__lowerCamelCase : List[Any] ='autoformer'
__lowerCamelCase : str ={
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
'num_hidden_layers': 'encoder_layers',
}
def __init__( self : List[Any] , __lowercase : Optional[int] = None , __lowercase : Optional[int] = None , __lowercase : str = "student_t" , __lowercase : str = "nll" , __lowercase : int = 1 , __lowercase : List[int] = [1, 2, 3, 4, 5, 6, 7] , __lowercase : bool = True , __lowercase : int = 0 , __lowercase : int = 0 , __lowercase : int = 0 , __lowercase : int = 0 , __lowercase : Optional[List[int]] = None , __lowercase : Optional[List[int]] = None , __lowercase : int = 64 , __lowercase : int = 2 , __lowercase : int = 2 , __lowercase : int = 2 , __lowercase : int = 2 , __lowercase : int = 32 , __lowercase : int = 32 , __lowercase : str = "gelu" , __lowercase : float = 0.1 , __lowercase : float = 0.1 , __lowercase : float = 0.1 , __lowercase : float = 0.1 , __lowercase : float = 0.1 , __lowercase : int = 100 , __lowercase : float = 0.02 , __lowercase : bool = True , __lowercase : List[Any]=True , __lowercase : int = 10 , __lowercase : int = 25 , __lowercase : int = 3 , **__lowercase : Optional[int] , ):
'''simple docstring'''
# time series specific configuration
__a = prediction_length
__a = context_length if context_length is not None else prediction_length
__a = distribution_output
__a = loss
__a = input_size
__a = num_time_features
__a = lags_sequence
__a = scaling
__a = num_dynamic_real_features
__a = num_static_real_features
__a = num_static_categorical_features
if cardinality is not None and num_static_categorical_features > 0:
if len(__lowercase ) != num_static_categorical_features:
raise ValueError(
"""The cardinality should be a list of the same length as `num_static_categorical_features`""" )
__a = cardinality
else:
__a = [0]
if embedding_dimension is not None and num_static_categorical_features > 0:
if len(__lowercase ) != num_static_categorical_features:
raise ValueError(
"""The embedding dimension should be a list of the same length as `num_static_categorical_features`""" )
__a = embedding_dimension
else:
__a = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
__a = num_parallel_samples
# Transformer architecture configuration
__a = input_size * len(self.lags_sequence ) + self._number_of_features
__a = d_model
__a = encoder_attention_heads
__a = decoder_attention_heads
__a = encoder_ffn_dim
__a = decoder_ffn_dim
__a = encoder_layers
__a = decoder_layers
__a = dropout
__a = attention_dropout
__a = activation_dropout
__a = encoder_layerdrop
__a = decoder_layerdrop
__a = activation_function
__a = init_std
__a = use_cache
# Autoformer
__a = label_length
__a = moving_average
__a = autocorrelation_factor
super().__init__(is_encoder_decoder=__lowercase , **__lowercase )
@property
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 302
| 0
|
'''simple docstring'''
# HF Trainer benchmarking tool
#
# This tool can be used to run and compare multiple dimensions of the HF Trainers args.
#
# It then prints a report once in github format with all the information that needs to be shared
# with others and second time in a console-friendly format, so it's easier to use for tuning things up.
#
# The main idea is:
#
# ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \
# --target-metric-key train_samples_per_second
#
# The variations can be any command line argument that you want to compare and not just dtype as in
# the example.
#
# --variations allows you to compare variations in multiple dimensions.
#
# as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6
# times adding one of:
#
# 1. --tf32 0 --fp16 0
# 2. --tf32 0 --fp16 1
# 3. --tf32 0 --bf16 1
# 4. --tf32 1 --fp16 0
# 5. --tf32 1 --fp16 1
# 6. --tf32 1 --bf16 1
#
# and print the results. This is just a cartesian product - and more than 2 dimensions can be used.
#
# If you want to rely on defaults, this:
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1'
# is identical to this:
# --variations '--tf32 0|--tf32 1' '|--fp16|--bf16'
#
# the leading empty variation in the 2nd dimension is a valid variation.
#
# So here we get the following 6 variations:
#
# 1. --tf32 0
# 2. --tf32 0 --fp16
# 3. --tf32 0 --bf16
# 4. --tf32 1
# 5. --tf32 1 --fp16
# 6. --tf32 1 --bf16
#
# In this particular case we don't know what the default tf32 setting is as it's normally
# pytorch-version dependent). That's why it's best to do an explicit setting of each variation:
# `--tf32 0|--tf32 1`
#
# Here is a full example of a train:
#
# CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \
# --base-cmd \
# ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \
# --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \
# --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \
# --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \
# --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \
# --source_prefix "translate English to Romanian: " --warmup_steps 50 \
# --max_train_samples 20000 --dataloader_num_workers 2 ' \
# --target-metric-key train_samples_per_second --repeat-times 1 --variations \
# '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \
# --repeat-times 1 --base-variation '--tf32 0'
#
# and here is a possible output:
#
#
# | Variation | Train | Diff | Train |
# | | samples | % | loss |
# | | per | | |
# | | second | | |
# |:----------------|----------:|-------:|--------:|
# | --tf32 0 | 285.11 | 0 | 2.51 |
# | --tf32 1 | 342.09 | 20 | 2.51 |
# | --fp16 --tf32 0 | 423.49 | 49 | 2.51 |
# | --fp16 --tf32 1 | 423.13 | 48 | 2.51 |
# | --bf16 --tf32 0 | 416.80 | 46 | 2.52 |
# | --bf16 --tf32 1 | 415.87 | 46 | 2.52 |
#
#
# So you can quickly compare the different outcomes.
#
# Typically running each experiment once is enough, but if the environment is unstable you can
# re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results.
#
# By default it'll use the lowest result as the base line to use as 100% and then compare the rest to
# it as can be seen from the table above, but you can also specify which combination is the one to use as
# the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0'
#
# --target-metric-key is there to tell the program which metrics to compare - the different metric keys are
# inside output_dir/all_results.json. e.g., to measure eval performance instead of train use:
# --target-metric-key eval_samples_per_second
# but of course you will need to adjust the --base-cmd value in the example to perform evaluation as
# well (as currently it doesn't)
#
import argparse
import datetime
import io
import itertools
import json
import math
import os
import platform
import re
import shlex
import subprocess
import sys
from pathlib import Path
from statistics import fmean
import pandas as pd
import torch
from tqdm import tqdm
import transformers
_A : List[str] =float('''nan''')
class _lowercase :
def __init__( self: List[str] , UpperCamelCase__: Union[str, Any] ):
lowerCamelCase__ : Union[str, Any] = sys.stdout
lowerCamelCase__ : Union[str, Any] = open(__lowercase , """a""" )
def __getattr__( self: str , UpperCamelCase__: Union[str, Any] ):
return getattr(self.stdout , __lowercase )
def lowerCamelCase_ ( self: int , UpperCamelCase__: str ):
self.stdout.write(__lowercase )
# strip tqdm codes
self.file.write(re.sub(R"""^.*\r""" , """""" , __lowercase , 0 , re.M ) )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase=80 , UpperCamelCase=False ) -> int:
lowerCamelCase__ : Union[str, Any] = []
# deal with critical env vars
lowerCamelCase__ : Any = ["""CUDA_VISIBLE_DEVICES"""]
for key in env_keys:
lowerCamelCase__ : int = os.environ.get(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if val is not None:
cmd.append(f'''{key}={val}''' )
# python executable (not always needed if the script is executable)
lowerCamelCase__ : Dict = sys.executable if full_python_path else sys.executable.split("""/""" )[-1]
cmd.append(_SCREAMING_SNAKE_CASE )
# now the normal args
cmd += list(map(shlex.quote , sys.argv ) )
# split up into up to MAX_WIDTH lines with shell multi-line escapes
lowerCamelCase__ : Optional[int] = []
lowerCamelCase__ : Any = """"""
while len(_SCREAMING_SNAKE_CASE ) > 0:
current_line += f'''{cmd.pop(0 )} '''
if len(_SCREAMING_SNAKE_CASE ) == 0 or len(_SCREAMING_SNAKE_CASE ) + len(cmd[0] ) + 1 > max_width - 1:
lines.append(_SCREAMING_SNAKE_CASE )
lowerCamelCase__ : str = """"""
return "\\\n".join(_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> List[str]:
lowerCamelCase__ : Any = re.sub(r"""[\\\n]+""" , """ """ , args.base_cmd )
# remove --output_dir if any and set our own
lowerCamelCase__ : Tuple = re.sub("""--output_dir\s+[^\s]+""" , """""" , args.base_cmd )
args.base_cmd += f''' --output_dir {output_dir}'''
# ensure we have --overwrite_output_dir
lowerCamelCase__ : List[str] = re.sub("""--overwrite_output_dir\s+""" , """""" , args.base_cmd )
args.base_cmd += " --overwrite_output_dir"
return [sys.executable] + shlex.split(args.base_cmd )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Optional[int]:
if 0:
import random
from time import sleep
sleep(0 )
return dict(
{k: random.uniform(0 , 100 ) for k in metric_keys} , **{target_metric_key: random.choice([nan, 10.31, 100.2, 55.6666, 222.2222_2222] )} , )
lowerCamelCase__ : Optional[Any] = subprocess.run(_SCREAMING_SNAKE_CASE , capture_output=_SCREAMING_SNAKE_CASE , text=_SCREAMING_SNAKE_CASE )
if verbose:
print("""STDOUT""" , result.stdout )
print("""STDERR""" , result.stderr )
# save the streams
lowerCamelCase__ : Any = variation.replace(""" """ , """-""" )
with open(Path(_SCREAMING_SNAKE_CASE ) / f'''log.{prefix}.stdout.txt''' , """w""" ) as f:
f.write(result.stdout )
with open(Path(_SCREAMING_SNAKE_CASE ) / f'''log.{prefix}.stderr.txt''' , """w""" ) as f:
f.write(result.stderr )
if result.returncode != 0:
if verbose:
print("""failed""" )
return {target_metric_key: nan}
with io.open(f'''{output_dir}/all_results.json''' , """r""" , encoding="""utf-8""" ) as f:
lowerCamelCase__ : List[str] = json.load(_SCREAMING_SNAKE_CASE )
# filter out just the keys we want
return {k: v for k, v in metrics.items() if k in metric_keys}
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , ) -> Optional[Any]:
lowerCamelCase__ : Optional[int] = []
lowerCamelCase__ : int = []
lowerCamelCase__ : Dict = f'''{id}: {variation:<{longest_variation_len}}'''
lowerCamelCase__ : Tuple = f'''{preamble}: '''
lowerCamelCase__ : Any = set(report_metric_keys + [target_metric_key] )
for i in tqdm(range(_SCREAMING_SNAKE_CASE ) , desc=_SCREAMING_SNAKE_CASE , leave=_SCREAMING_SNAKE_CASE ):
lowerCamelCase__ : Tuple = process_run_single(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCamelCase__ : List[Any] = single_run_metrics[target_metric_key]
if not math.isnan(_SCREAMING_SNAKE_CASE ):
metrics.append(_SCREAMING_SNAKE_CASE )
results.append(_SCREAMING_SNAKE_CASE )
outcome += "✓"
else:
outcome += "✘"
lowerCamelCase__ : List[Any] = f'''\33[2K\r{outcome}'''
if len(_SCREAMING_SNAKE_CASE ) > 0:
lowerCamelCase__ : Union[str, Any] = {k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()}
lowerCamelCase__ : Tuple = round(mean_metrics[target_metric_key] , 2 )
lowerCamelCase__ : List[str] = f'''{outcome} {mean_target}'''
if len(_SCREAMING_SNAKE_CASE ) > 1:
results_str += f''' {tuple(round(_SCREAMING_SNAKE_CASE , 2 ) for x in results )}'''
print(_SCREAMING_SNAKE_CASE )
lowerCamelCase__ : Optional[Any] = variation
return mean_metrics
else:
print(_SCREAMING_SNAKE_CASE )
return {variation_key: variation, target_metric_key: nan}
def SCREAMING_SNAKE_CASE_ () -> List[Any]:
lowerCamelCase__ : Optional[int] = torch.cuda.get_device_properties(torch.device("""cuda""" ) )
return f'''\nDatetime : {datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S' )}\n\nSoftware:\ntransformers: {transformers.__version__}\ntorch : {torch.__version__}\ncuda : {torch.version.cuda}\npython : {platform.python_version()}\n\nHardware:\n{torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB\n'''
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Optional[int]:
lowerCamelCase__ : int = pd.DataFrame(_SCREAMING_SNAKE_CASE )
lowerCamelCase__ : List[Any] = """variation"""
lowerCamelCase__ : Tuple = """diff_%"""
lowerCamelCase__ : Optional[int] = nan
if base_variation is not None and len(df[df[variation_key] == base_variation] ):
# this may still return nan
lowerCamelCase__ : List[str] = df.loc[df[variation_key] == base_variation][target_metric_key].item()
if math.isnan(_SCREAMING_SNAKE_CASE ):
# as a fallback, use the minimal value as the sentinel
lowerCamelCase__ : Tuple = df.loc[df[target_metric_key] != nan][target_metric_key].min()
# create diff column if possible
if not math.isnan(_SCREAMING_SNAKE_CASE ):
lowerCamelCase__ : Optional[Any] = df.apply(
lambda UpperCamelCase : round(100 * (r[target_metric_key] - sentinel_value) / sentinel_value )
if not math.isnan(r[target_metric_key] )
else 0 , axis="""columns""" , )
# re-order columns
lowerCamelCase__ : List[Any] = [variation_key, target_metric_key, diff_key, *report_metric_keys]
lowerCamelCase__ : Tuple = df.reindex(_SCREAMING_SNAKE_CASE , axis="""columns""" ) # reorder cols
# capitalize
lowerCamelCase__ : Tuple = df.rename(str.capitalize , axis="""columns""" )
# make the cols as narrow as possible
lowerCamelCase__ : Optional[int] = df.rename(lambda UpperCamelCase : c.replace("""_""" , """<br>""" ) , axis="""columns""" )
lowerCamelCase__ : str = df.rename(lambda UpperCamelCase : c.replace("""_""" , """\n""" ) , axis="""columns""" )
lowerCamelCase__ : str = ["""""", """Copy between the cut-here-lines and paste as is to github or a forum"""]
report += ["----------8<-----------------8<--------"]
report += ["*** Results:", df_github.to_markdown(index=_SCREAMING_SNAKE_CASE , floatfmt=""".2f""" )]
report += ["```"]
report += ["*** Setup:", get_versions()]
report += ["*** The benchmark command line was:", get_original_command()]
report += ["```"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results (console):", df_console.to_markdown(index=_SCREAMING_SNAKE_CASE , floatfmt=""".2f""" )]
print("""\n\n""".join(_SCREAMING_SNAKE_CASE ) )
def SCREAMING_SNAKE_CASE_ () -> Dict:
lowerCamelCase__ : int = argparse.ArgumentParser()
parser.add_argument(
"""--base-cmd""" , default=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE , help="""Base cmd""" , )
parser.add_argument(
"""--variations""" , default=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE , nargs="""+""" , required=_SCREAMING_SNAKE_CASE , help="""Multi-dimensional variations, example: '|--fp16|--bf16' '|--tf32'""" , )
parser.add_argument(
"""--base-variation""" , default=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE , help="""Baseline variation to compare to. if None the minimal target value will be used to compare against""" , )
parser.add_argument(
"""--target-metric-key""" , default=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE , help="""Target metric key in output_dir/all_results.json, e.g., train_samples_per_second""" , )
parser.add_argument(
"""--report-metric-keys""" , default="""""" , type=_SCREAMING_SNAKE_CASE , help="""Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., 'train_loss train_samples""" , )
parser.add_argument(
"""--repeat-times""" , default=1 , type=_SCREAMING_SNAKE_CASE , help="""How many times to re-run each variation - an average will be reported""" , )
parser.add_argument(
"""--output_dir""" , default="""output_benchmark""" , type=_SCREAMING_SNAKE_CASE , help="""The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked""" , )
parser.add_argument(
"""--verbose""" , default=_SCREAMING_SNAKE_CASE , action="""store_true""" , help="""Whether to show the outputs of each run or just the benchmark progress""" , )
lowerCamelCase__ : Any = parser.parse_args()
lowerCamelCase__ : Any = args.output_dir
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
lowerCamelCase__ : List[str] = get_base_command(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# split each dimension into its --foo variations
lowerCamelCase__ : Optional[Any] = [list(map(str.strip , re.split(r"""\|""" , _SCREAMING_SNAKE_CASE ) ) ) for x in args.variations]
# build a cartesian product of dimensions and convert those back into cmd-line arg strings,
# while stripping white space for inputs that were empty
lowerCamelCase__ : Any = list(map(str.strip , map(""" """.join , itertools.product(*_SCREAMING_SNAKE_CASE ) ) ) )
lowerCamelCase__ : Optional[Any] = max(len(_SCREAMING_SNAKE_CASE ) for x in variations )
# split wanted keys
lowerCamelCase__ : Union[str, Any] = args.report_metric_keys.split()
# capture prints into a log file for convenience
lowerCamelCase__ : Any = f'''benchmark-report-{datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S' )}.txt'''
print(f'''\nNote: each run\'s output is also logged under {output_dir}/log.*.std*.txt''' )
print(f'''and this script\'s output is also piped into {report_fn}''' )
lowerCamelCase__ : Optional[Any] = Tee(_SCREAMING_SNAKE_CASE )
print(f'''\n*** Running {len(_SCREAMING_SNAKE_CASE )} benchmarks:''' )
print(f'''Base command: {' '.join(_SCREAMING_SNAKE_CASE )}''' )
lowerCamelCase__ : Optional[int] = """variation"""
lowerCamelCase__ : List[str] = []
for id, variation in enumerate(tqdm(_SCREAMING_SNAKE_CASE , desc="""Total completion: """ , leave=_SCREAMING_SNAKE_CASE ) ):
lowerCamelCase__ : Any = base_cmd + variation.split()
results.append(
process_run(
id + 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , args.target_metric_key , _SCREAMING_SNAKE_CASE , args.repeat_times , _SCREAMING_SNAKE_CASE , args.verbose , ) )
process_results(_SCREAMING_SNAKE_CASE , args.target_metric_key , _SCREAMING_SNAKE_CASE , args.base_variation , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 41
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase__ = {
"""configuration_electra""": ["""ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ElectraConfig""", """ElectraOnnxConfig"""],
"""tokenization_electra""": ["""ElectraTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""ElectraTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ElectraForCausalLM""",
"""ElectraForMaskedLM""",
"""ElectraForMultipleChoice""",
"""ElectraForPreTraining""",
"""ElectraForQuestionAnswering""",
"""ElectraForSequenceClassification""",
"""ElectraForTokenClassification""",
"""ElectraModel""",
"""ElectraPreTrainedModel""",
"""load_tf_weights_in_electra""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFElectraForMaskedLM""",
"""TFElectraForMultipleChoice""",
"""TFElectraForPreTraining""",
"""TFElectraForQuestionAnswering""",
"""TFElectraForSequenceClassification""",
"""TFElectraForTokenClassification""",
"""TFElectraModel""",
"""TFElectraPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""FlaxElectraForCausalLM""",
"""FlaxElectraForMaskedLM""",
"""FlaxElectraForMultipleChoice""",
"""FlaxElectraForPreTraining""",
"""FlaxElectraForQuestionAnswering""",
"""FlaxElectraForSequenceClassification""",
"""FlaxElectraForTokenClassification""",
"""FlaxElectraModel""",
"""FlaxElectraPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 302
| 0
|
from collections import UserDict
from typing import Union
import numpy as np
import requests
from ..utils import (
add_end_docstrings,
logging,
)
from .audio_classification import ffmpeg_read
from .base import PIPELINE_INIT_ARGS, Pipeline
a__: Dict = logging.get_logger(__name__)
@add_end_docstrings(lowerCamelCase__ )
class SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ):
def __init__( self,**__lowerCamelCase ):
super().__init__(**__lowercase )
if self.framework != "pt":
raise ValueError(f"The {self.__class__} is only available in PyTorch." )
# No specific FOR_XXX available yet
def __call__( self,__lowerCamelCase,**__lowerCamelCase ):
return super().__call__(__lowercase,**__lowercase )
def UpperCamelCase ( self,**__lowerCamelCase ):
A__ = {}
if "candidate_labels" in kwargs:
A__ = kwargs['''candidate_labels''']
if "hypothesis_template" in kwargs:
A__ = kwargs['''hypothesis_template''']
return preprocess_params, {}, {}
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase=None,__lowerCamelCase="This is a sound of {}." ):
if isinstance(__lowercase,__lowercase ):
if audio.startswith('''http://''' ) or audio.startswith('''https://''' ):
# We need to actually check for a real protocol, otherwise it's impossible to use a local file
# like http_huggingface_co.png
A__ = requests.get(__lowercase ).content
else:
with open(__lowercase,'''rb''' ) as f:
A__ = f.read()
if isinstance(__lowercase,__lowercase ):
A__ = ffmpeg_read(__lowercase,self.feature_extractor.sampling_rate )
if not isinstance(__lowercase,np.ndarray ):
raise ValueError('''We expect a numpy ndarray as input''' )
if len(audio.shape ) != 1:
raise ValueError('''We expect a single channel audio input for ZeroShotAudioClassificationPipeline''' )
A__ = self.feature_extractor(
[audio],sampling_rate=self.feature_extractor.sampling_rate,return_tensors='''pt''' )
A__ = candidate_labels
A__ = [hypothesis_template.format(__lowercase ) for x in candidate_labels]
A__ = self.tokenizer(__lowercase,return_tensors=self.framework,padding=__lowercase )
A__ = [text_inputs]
return inputs
def UpperCamelCase ( self,__lowerCamelCase ):
A__ = model_inputs.pop('''candidate_labels''' )
A__ = model_inputs.pop('''text_inputs''' )
if isinstance(text_inputs[0],__lowercase ):
A__ = text_inputs[0]
else:
# Batching case.
A__ = text_inputs[0][0]
A__ = self.model(**__lowercase,**__lowercase )
A__ = {
'''candidate_labels''': candidate_labels,
'''logits''': outputs.logits_per_audio,
}
return model_outputs
def UpperCamelCase ( self,__lowerCamelCase ):
A__ = model_outputs.pop('''candidate_labels''' )
A__ = model_outputs['''logits'''][0]
if self.framework == "pt":
A__ = logits.softmax(dim=0 )
A__ = probs.tolist()
else:
raise ValueError('''`tf` framework not supported.''' )
A__ = [
{'''score''': score, '''label''': candidate_label}
for score, candidate_label in sorted(zip(__lowercase,__lowercase ),key=lambda __lowerCamelCase : -x[0] )
]
return result
| 193
|
from __future__ import annotations
lowerCamelCase__ = """#"""
class SCREAMING_SNAKE_CASE :
def __init__( self : Optional[Any] ):
'''simple docstring'''
__a = {}
def UpperCamelCase_ ( self : Optional[Any] , __lowercase : str ):
'''simple docstring'''
__a = self._trie
for char in text:
if char not in trie:
__a = {}
__a = trie[char]
__a = True
def UpperCamelCase_ ( self : Tuple , __lowercase : str ):
'''simple docstring'''
__a = self._trie
for char in prefix:
if char in trie:
__a = trie[char]
else:
return []
return self._elements(__lowercase )
def UpperCamelCase_ ( self : Optional[int] , __lowercase : dict ):
'''simple docstring'''
__a = []
for c, v in d.items():
__a = [""" """] if c == END else [(c + s) for s in self._elements(__lowercase )]
result.extend(__lowercase )
return tuple(__lowercase )
lowerCamelCase__ = Trie()
lowerCamelCase__ = ("""depart""", """detergent""", """daring""", """dog""", """deer""", """deal""")
for word in words:
trie.insert_word(word)
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
__a = trie.find_word(_SCREAMING_SNAKE_CASE )
return tuple(string + word for word in suffixes )
def lowerCAmelCase__ ( ):
"""simple docstring"""
print(autocomplete_using_trie("""de""" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 302
| 0
|
import unittest
from transformers import SPIECE_UNDERLINE, ReformerTokenizer, ReformerTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__magic_name__: Optional[Any] = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class snake_case__ ( lowerCamelCase__ , unittest.TestCase ):
lowercase__ : Optional[int] = ReformerTokenizer
lowercase__ : Optional[Any] = ReformerTokenizerFast
lowercase__ : List[str] = True
lowercase__ : Optional[Any] = False
lowercase__ : int = True
def __magic_name__ ( self ) -> str:
super().setUp()
__magic_name__ : List[Any] = ReformerTokenizer(__lowercase , keep_accents=__lowercase )
tokenizer.save_pretrained(self.tmpdirname )
def __magic_name__ ( self ) -> int:
__magic_name__ : Any = """<s>"""
__magic_name__ : str = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowercase ) , __lowercase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowercase ) , __lowercase )
def __magic_name__ ( self ) -> Optional[Any]:
__magic_name__ : List[str] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<unk>""" )
self.assertEqual(vocab_keys[1] , """<s>""" )
self.assertEqual(vocab_keys[-1] , """j""" )
self.assertEqual(len(__lowercase ) , 10_00 )
def __magic_name__ ( self ) -> Any:
self.assertEqual(self.get_tokenizer().vocab_size , 10_00 )
def __magic_name__ ( self ) -> Union[str, Any]:
if not self.test_rust_tokenizer:
return
__magic_name__ : List[str] = self.get_tokenizer()
__magic_name__ : List[str] = self.get_rust_tokenizer()
__magic_name__ : List[str] = """I was born in 92000, and this is falsé."""
__magic_name__ : int = tokenizer.tokenize(__lowercase )
__magic_name__ : Tuple = rust_tokenizer.tokenize(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
__magic_name__ : int = tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
__magic_name__ : List[Any] = rust_tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
self.assertListEqual(__lowercase , __lowercase )
__magic_name__ : Tuple = self.get_rust_tokenizer()
__magic_name__ : Dict = tokenizer.encode(__lowercase )
__magic_name__ : int = rust_tokenizer.encode(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
def __magic_name__ ( self , lowerCAmelCase__=15 ) -> Optional[int]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__magic_name__ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(__lowercase , **__lowercase )
# Simple input
__magic_name__ : Optional[int] = """This is a simple input"""
__magic_name__ : Optional[int] = ["""This is a simple input 1""", """This is a simple input 2"""]
__magic_name__ : Optional[int] = ("""This is a simple input""", """This is a pair""")
__magic_name__ : Optional[Any] = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
self.assertRaises(__lowercase , tokenizer_r.encode , __lowercase , max_length=__lowercase , padding="""max_length""" )
# Simple input
self.assertRaises(__lowercase , tokenizer_r.encode_plus , __lowercase , max_length=__lowercase , padding="""max_length""" )
# Simple input
self.assertRaises(
__lowercase , tokenizer_r.batch_encode_plus , __lowercase , max_length=__lowercase , padding="""max_length""" , )
# Pair input
self.assertRaises(__lowercase , tokenizer_r.encode , __lowercase , max_length=__lowercase , padding="""max_length""" )
# Pair input
self.assertRaises(__lowercase , tokenizer_r.encode_plus , __lowercase , max_length=__lowercase , padding="""max_length""" )
# Pair input
self.assertRaises(
__lowercase , tokenizer_r.batch_encode_plus , __lowercase , max_length=__lowercase , padding="""max_length""" , )
def __magic_name__ ( self ) -> Union[str, Any]:
pass
def __magic_name__ ( self ) -> Optional[Any]:
__magic_name__ : Any = ReformerTokenizer(__lowercase , keep_accents=__lowercase )
__magic_name__ : Dict = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(__lowercase , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__lowercase ) , [2_85, 46, 10, 1_70, 3_82] , )
__magic_name__ : Any = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__lowercase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
__magic_name__ : int = tokenizer.convert_tokens_to_ids(__lowercase )
self.assertListEqual(
__lowercase , [8, 21, 84, 55, 24, 19, 7, 0, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
__magic_name__ : Tuple = tokenizer.convert_ids_to_tokens(__lowercase )
self.assertListEqual(
__lowercase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
@cached_property
def __magic_name__ ( self ) -> Any:
return ReformerTokenizer.from_pretrained("""google/reformer-crime-and-punishment""" )
@slow
def __magic_name__ ( self ) -> Any:
__magic_name__ : Union[str, Any] = """Hello World!"""
__magic_name__ : List[Any] = [1_26, 32, 2_62, 1_52, 38, 72, 2_87]
self.assertListEqual(__lowercase , self.big_tokenizer.encode(__lowercase ) )
@slow
def __magic_name__ ( self ) -> Optional[int]:
__magic_name__ : Tuple = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"""
)
__magic_name__ : Any = [
1_08,
2_65,
24,
1_11,
4,
2_58,
1_56,
35,
28,
2_75,
3,
2_59,
2_97,
2_60,
84,
4,
35,
1_10,
44,
8,
2_59,
91,
2_68,
21,
11,
2_09,
2_74,
1_09,
2_66,
2_77,
1_17,
86,
93,
3_15,
2_58,
2_78,
2_58,
2_77,
2_58,
0,
2_58,
2_88,
2_58,
3_19,
2_58,
0,
2_58,
0,
2_58,
0,
2_58,
0,
2_58,
2_87,
2_58,
3_15,
2_58,
2_89,
2_58,
2_78,
99,
2_69,
2_66,
2_62,
8,
2_59,
2_41,
4,
2_17,
2_30,
2_68,
2_66,
55,
1_68,
1_06,
75,
1_93,
2_66,
2_23,
27,
49,
26,
2_82,
25,
2_64,
2_99,
19,
26,
0,
2_58,
2_77,
1_17,
86,
93,
1_76,
1_83,
2_70,
11,
2_62,
42,
61,
2_65,
]
self.assertListEqual(__lowercase , self.big_tokenizer.encode(__lowercase ) )
@require_torch
@slow
def __magic_name__ ( self ) -> Any:
import torch
from transformers import ReformerConfig, ReformerModel
# Build sequence
__magic_name__ : Union[str, Any] = list(self.big_tokenizer.get_vocab().keys() )[:10]
__magic_name__ : Union[str, Any] = """ """.join(__lowercase )
__magic_name__ : Optional[int] = self.big_tokenizer.encode_plus(__lowercase , return_tensors="""pt""" )
__magic_name__ : Dict = self.big_tokenizer.batch_encode_plus([sequence, sequence] , return_tensors="""pt""" )
__magic_name__ : Optional[Any] = ReformerConfig()
# The input gets padded during training so adjust the axial position encodings from the pretrained model value of (512, 1024)
__magic_name__ : Optional[int] = encoded_sequence["""input_ids"""].shape
__magic_name__ : Any = ReformerModel(__lowercase )
# Reformer has config.vocab_size == tokenizer.vocab_size == len(tokenizer) - 1 = 320; len(tokenizer) is 321 (including a pad token with id 320)
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**__lowercase )
model(**__lowercase )
@slow
def __magic_name__ ( self ) -> List[Any]:
__magic_name__ : Union[str, Any] = {"""input_ids""": [[1_08, 2_65, 24, 1_11, 4, 2_58, 1_56, 7, 51, 2_79, 58, 7, 76, 25, 69, 2_78], [1_40, 2_43, 2_64, 1_34, 17, 2_67, 77, 2_63, 22, 2_62, 2_97, 2_58, 3_04, 1_77, 2_79, 2_66, 14, 89, 13, 35, 2_61, 2_99, 2_72, 1_37, 2_75, 2_78]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# This tokenizer does not know some characters like ")".
# That is the reason why we use very simple texts here.
# Also see https://github.com/huggingface/transformers/pull/11737#issuecomment-850769064
__magic_name__ : List[Any] = [
"""This is a very simple sentence.""",
"""The quick brown fox jumps over the lazy dog.""",
]
self.tokenizer_integration_test_util(
expected_encoding=__lowercase , model_name="""google/reformer-crime-and-punishment""" , revision="""0e6c3decb8211d49bf881013425dc8b0448b3f5a""" , padding=__lowercase , sequences=__lowercase , )
| 342
|
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
__lowerCamelCase : torch.FloatTensor
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ , lowerCamelCase__ ):
@register_to_config
def __init__( self : Dict , __lowercase : int = 32 , __lowercase : int = 64 , __lowercase : int = 20 , __lowercase : int = 768 , __lowercase : Any=77 , __lowercase : Optional[int]=4 , __lowercase : float = 0.0 , __lowercase : str = "silu" , __lowercase : Optional[str] = None , __lowercase : Optional[str] = None , __lowercase : Optional[str] = "linear" , __lowercase : Optional[str] = "prd" , __lowercase : Optional[int] = None , __lowercase : Optional[int] = None , __lowercase : Optional[int] = None , ):
'''simple docstring'''
super().__init__()
__a = num_attention_heads
__a = attention_head_dim
__a = num_attention_heads * attention_head_dim
__a = additional_embeddings
__a = time_embed_dim or inner_dim
__a = embedding_proj_dim or embedding_dim
__a = clip_embed_dim or embedding_dim
__a = Timesteps(__lowercase , __lowercase , 0 )
__a = TimestepEmbedding(__lowercase , __lowercase , out_dim=__lowercase , act_fn=__lowercase )
__a = nn.Linear(__lowercase , __lowercase )
if embedding_proj_norm_type is None:
__a = None
elif embedding_proj_norm_type == "layer":
__a = nn.LayerNorm(__lowercase )
else:
raise ValueError(F"unsupported embedding_proj_norm_type: {embedding_proj_norm_type}" )
__a = nn.Linear(__lowercase , __lowercase )
if encoder_hid_proj_type is None:
__a = None
elif encoder_hid_proj_type == "linear":
__a = nn.Linear(__lowercase , __lowercase )
else:
raise ValueError(F"unsupported encoder_hid_proj_type: {encoder_hid_proj_type}" )
__a = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , __lowercase ) )
if added_emb_type == "prd":
__a = nn.Parameter(torch.zeros(1 , 1 , __lowercase ) )
elif added_emb_type is None:
__a = None
else:
raise ValueError(
F"`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `'prd'` or `None`." )
__a = nn.ModuleList(
[
BasicTransformerBlock(
__lowercase , __lowercase , __lowercase , dropout=__lowercase , activation_fn="""gelu""" , attention_bias=__lowercase , )
for d in range(__lowercase )
] )
if norm_in_type == "layer":
__a = nn.LayerNorm(__lowercase )
elif norm_in_type is None:
__a = None
else:
raise ValueError(F"Unsupported norm_in_type: {norm_in_type}." )
__a = nn.LayerNorm(__lowercase )
__a = nn.Linear(__lowercase , __lowercase )
__a = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -10000.0 )
causal_attention_mask.triu_(1 )
__a = causal_attention_mask[None, ...]
self.register_buffer("""causal_attention_mask""" , __lowercase , persistent=__lowercase )
__a = nn.Parameter(torch.zeros(1 , __lowercase ) )
__a = nn.Parameter(torch.zeros(1 , __lowercase ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
__a = {}
def fn_recursive_add_processors(__lowercase : str , __lowercase : torch.nn.Module , __lowercase : Dict[str, AttentionProcessor] ):
if hasattr(__lowercase , """set_processor""" ):
__a = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F"{name}.{sub_name}" , __lowercase , __lowercase )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(__lowercase , __lowercase , __lowercase )
return processors
def UpperCamelCase_ ( self : List[str] , __lowercase : Union[AttentionProcessor, Dict[str, AttentionProcessor]] ):
'''simple docstring'''
__a = len(self.attn_processors.keys() )
if isinstance(__lowercase , __lowercase ) and len(__lowercase ) != count:
raise ValueError(
F"A dict of processors was passed, but the number of processors {len(__lowercase )} does not match the"
F" number of attention layers: {count}. Please make sure to pass {count} processor classes." )
def fn_recursive_attn_processor(__lowercase : str , __lowercase : torch.nn.Module , __lowercase : Dict ):
if hasattr(__lowercase , """set_processor""" ):
if not isinstance(__lowercase , __lowercase ):
module.set_processor(__lowercase )
else:
module.set_processor(processor.pop(F"{name}.processor" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F"{name}.{sub_name}" , __lowercase , __lowercase )
for name, module in self.named_children():
fn_recursive_attn_processor(__lowercase , __lowercase , __lowercase )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
self.set_attn_processor(AttnProcessor() )
def UpperCamelCase_ ( self : Union[str, Any] , __lowercase : Optional[int] , __lowercase : Union[torch.Tensor, float, int] , __lowercase : torch.FloatTensor , __lowercase : Optional[torch.FloatTensor] = None , __lowercase : Optional[torch.BoolTensor] = None , __lowercase : bool = True , ):
'''simple docstring'''
__a = hidden_states.shape[0]
__a = timestep
if not torch.is_tensor(__lowercase ):
__a = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device )
elif torch.is_tensor(__lowercase ) and len(timesteps.shape ) == 0:
__a = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
__a = timesteps * torch.ones(__lowercase , dtype=timesteps.dtype , device=timesteps.device )
__a = self.time_proj(__lowercase )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
__a = timesteps_projected.to(dtype=self.dtype )
__a = self.time_embedding(__lowercase )
if self.embedding_proj_norm is not None:
__a = self.embedding_proj_norm(__lowercase )
__a = self.embedding_proj(__lowercase )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
__a = self.encoder_hidden_states_proj(__lowercase )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError("""`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set""" )
__a = self.proj_in(__lowercase )
__a = self.positional_embedding.to(hidden_states.dtype )
__a = []
__a = 0
if encoder_hidden_states is not None:
additional_embeds.append(__lowercase )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
__a = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
__a = hidden_states[:, None, :]
__a = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
__a = self.prd_embedding.to(hidden_states.dtype ).expand(__lowercase , -1 , -1 )
additional_embeds.append(__lowercase )
__a = torch.cat(
__lowercase , dim=1 , )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
__a = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
__a = F.pad(
__lowercase , (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) , value=0.0 , )
__a = hidden_states + positional_embeddings
if attention_mask is not None:
__a = (1 - attention_mask.to(hidden_states.dtype )) * -10000.0
__a = F.pad(__lowercase , (0, self.additional_embeddings) , value=0.0 )
__a = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
__a = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 )
if self.norm_in is not None:
__a = self.norm_in(__lowercase )
for block in self.transformer_blocks:
__a = block(__lowercase , attention_mask=__lowercase )
__a = self.norm_out(__lowercase )
if self.prd_embedding is not None:
__a = hidden_states[:, -1]
else:
__a = hidden_states[:, additional_embeddings_len:]
__a = self.proj_to_clip_embeddings(__lowercase )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=__lowercase )
def UpperCamelCase_ ( self : Any , __lowercase : Tuple ):
'''simple docstring'''
__a = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 302
| 0
|
'''simple docstring'''
from __future__ import annotations
from fractions import Fraction
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : int = []
lowerCAmelCase__ : Dict = 11
lowerCAmelCase__ : Any = int("""1""" + """0""" * digit_len )
for num in range(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
solutions.append(f"""{num}/{den}""" )
den += 1
num += 1
lowerCAmelCase__ : Optional[Any] = 10
return solutions
def _SCREAMING_SNAKE_CASE ( UpperCamelCase = 2 ):
"""simple docstring"""
lowerCAmelCase__ : str = 1.0
for fraction in fraction_list(_SCREAMING_SNAKE_CASE ):
lowerCAmelCase__ : Optional[int] = Fraction(_SCREAMING_SNAKE_CASE )
result *= frac.denominator / frac.numerator
return int(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print(solution())
| 37
|
from functools import lru_cache
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
__a = 2
__a = set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(_SCREAMING_SNAKE_CASE )
if n > 1:
factors.add(_SCREAMING_SNAKE_CASE )
return factors
@lru_cache
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
return len(unique_prime_factors(_SCREAMING_SNAKE_CASE ) )
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : list ):
"""simple docstring"""
return len(set(_SCREAMING_SNAKE_CASE ) ) in (0, 1)
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
__a = 2
while True:
# Increment each value of a generated range
__a = [base + i for i in range(_SCREAMING_SNAKE_CASE )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
__a = [upf_len(_SCREAMING_SNAKE_CASE ) for x in group]
checker.append(_SCREAMING_SNAKE_CASE )
# If all numbers in the list are equal, return the group variable.
if equality(_SCREAMING_SNAKE_CASE ):
return group
# Increment our base variable by 1
base += 1
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int = 4 ):
"""simple docstring"""
__a = run(_SCREAMING_SNAKE_CASE )
return results[0] if len(_SCREAMING_SNAKE_CASE ) else None
if __name__ == "__main__":
print(solution())
| 302
| 0
|
from ..utils import DummyObject, requires_backends
class lowerCamelCase__ ( metaclass=lowerCamelCase__):
'''simple docstring'''
_A = ['flax']
def __init__( self :int , *a :Union[str, Any] , **a :List[Any] ) -> str:
requires_backends(self , ["flax"] )
@classmethod
def _lowerCamelCase ( cls :Dict , *a :List[Any] , **a :Optional[Any] ) -> int:
requires_backends(cls , ["flax"] )
@classmethod
def _lowerCamelCase ( cls :Tuple , *a :Any , **a :Dict ) -> int:
requires_backends(cls , ["flax"] )
class lowerCamelCase__ ( metaclass=lowerCamelCase__):
'''simple docstring'''
_A = ['flax']
def __init__( self :Dict , *a :Tuple , **a :Any ) -> Union[str, Any]:
requires_backends(self , ["flax"] )
@classmethod
def _lowerCamelCase ( cls :Tuple , *a :Optional[int] , **a :Any ) -> List[Any]:
requires_backends(cls , ["flax"] )
@classmethod
def _lowerCamelCase ( cls :str , *a :Any , **a :str ) -> Any:
requires_backends(cls , ["flax"] )
class lowerCamelCase__ ( metaclass=lowerCamelCase__):
'''simple docstring'''
_A = ['flax']
def __init__( self :int , *a :Optional[Any] , **a :Dict ) -> Tuple:
requires_backends(self , ["flax"] )
@classmethod
def _lowerCamelCase ( cls :Dict , *a :str , **a :Optional[int] ) -> Optional[int]:
requires_backends(cls , ["flax"] )
@classmethod
def _lowerCamelCase ( cls :int , *a :str , **a :str ) -> str:
requires_backends(cls , ["flax"] )
class lowerCamelCase__ ( metaclass=lowerCamelCase__):
'''simple docstring'''
_A = ['flax']
def __init__( self :Optional[Any] , *a :Tuple , **a :List[Any] ) -> List[Any]:
requires_backends(self , ["flax"] )
@classmethod
def _lowerCamelCase ( cls :Optional[Any] , *a :Optional[Any] , **a :Any ) -> Dict:
requires_backends(cls , ["flax"] )
@classmethod
def _lowerCamelCase ( cls :Union[str, Any] , *a :List[str] , **a :List[Any] ) -> Any:
requires_backends(cls , ["flax"] )
class lowerCamelCase__ ( metaclass=lowerCamelCase__):
'''simple docstring'''
_A = ['flax']
def __init__( self :Tuple , *a :int , **a :int ) -> Any:
requires_backends(self , ["flax"] )
@classmethod
def _lowerCamelCase ( cls :Tuple , *a :str , **a :List[str] ) -> Optional[Any]:
requires_backends(cls , ["flax"] )
@classmethod
def _lowerCamelCase ( cls :str , *a :List[Any] , **a :int ) -> str:
requires_backends(cls , ["flax"] )
class lowerCamelCase__ ( metaclass=lowerCamelCase__):
'''simple docstring'''
_A = ['flax']
def __init__( self :List[str] , *a :List[str] , **a :Optional[Any] ) -> Any:
requires_backends(self , ["flax"] )
@classmethod
def _lowerCamelCase ( cls :Optional[Any] , *a :str , **a :List[Any] ) -> Tuple:
requires_backends(cls , ["flax"] )
@classmethod
def _lowerCamelCase ( cls :Optional[int] , *a :Dict , **a :Dict ) -> Optional[Any]:
requires_backends(cls , ["flax"] )
class lowerCamelCase__ ( metaclass=lowerCamelCase__):
'''simple docstring'''
_A = ['flax']
def __init__( self :Optional[int] , *a :Dict , **a :List[str] ) -> Dict:
requires_backends(self , ["flax"] )
@classmethod
def _lowerCamelCase ( cls :List[Any] , *a :Optional[int] , **a :Optional[Any] ) -> Optional[Any]:
requires_backends(cls , ["flax"] )
@classmethod
def _lowerCamelCase ( cls :Dict , *a :Any , **a :str ) -> str:
requires_backends(cls , ["flax"] )
class lowerCamelCase__ ( metaclass=lowerCamelCase__):
'''simple docstring'''
_A = ['flax']
def __init__( self :str , *a :Any , **a :List[Any] ) -> Optional[int]:
requires_backends(self , ["flax"] )
@classmethod
def _lowerCamelCase ( cls :int , *a :int , **a :int ) -> List[Any]:
requires_backends(cls , ["flax"] )
@classmethod
def _lowerCamelCase ( cls :Tuple , *a :str , **a :Any ) -> Union[str, Any]:
requires_backends(cls , ["flax"] )
class lowerCamelCase__ ( metaclass=lowerCamelCase__):
'''simple docstring'''
_A = ['flax']
def __init__( self :List[str] , *a :int , **a :int ) -> List[str]:
requires_backends(self , ["flax"] )
@classmethod
def _lowerCamelCase ( cls :List[Any] , *a :Optional[Any] , **a :str ) -> Tuple:
requires_backends(cls , ["flax"] )
@classmethod
def _lowerCamelCase ( cls :Dict , *a :str , **a :List[Any] ) -> Tuple:
requires_backends(cls , ["flax"] )
class lowerCamelCase__ ( metaclass=lowerCamelCase__):
'''simple docstring'''
_A = ['flax']
def __init__( self :str , *a :Optional[int] , **a :Any ) -> Any:
requires_backends(self , ["flax"] )
@classmethod
def _lowerCamelCase ( cls :List[str] , *a :str , **a :int ) -> Any:
requires_backends(cls , ["flax"] )
@classmethod
def _lowerCamelCase ( cls :List[Any] , *a :List[str] , **a :int ) -> Dict:
requires_backends(cls , ["flax"] )
class lowerCamelCase__ ( metaclass=lowerCamelCase__):
'''simple docstring'''
_A = ['flax']
def __init__( self :Optional[Any] , *a :List[Any] , **a :List[str] ) -> int:
requires_backends(self , ["flax"] )
@classmethod
def _lowerCamelCase ( cls :List[str] , *a :List[str] , **a :List[Any] ) -> Optional[Any]:
requires_backends(cls , ["flax"] )
@classmethod
def _lowerCamelCase ( cls :Optional[Any] , *a :List[str] , **a :List[str] ) -> Dict:
requires_backends(cls , ["flax"] )
class lowerCamelCase__ ( metaclass=lowerCamelCase__):
'''simple docstring'''
_A = ['flax']
def __init__( self :Optional[Any] , *a :Tuple , **a :int ) -> List[str]:
requires_backends(self , ["flax"] )
@classmethod
def _lowerCamelCase ( cls :List[str] , *a :List[str] , **a :Dict ) -> Optional[Any]:
requires_backends(cls , ["flax"] )
@classmethod
def _lowerCamelCase ( cls :Union[str, Any] , *a :Union[str, Any] , **a :Optional[int] ) -> Union[str, Any]:
requires_backends(cls , ["flax"] )
class lowerCamelCase__ ( metaclass=lowerCamelCase__):
'''simple docstring'''
_A = ['flax']
def __init__( self :Dict , *a :int , **a :Any ) -> Union[str, Any]:
requires_backends(self , ["flax"] )
@classmethod
def _lowerCamelCase ( cls :Any , *a :List[Any] , **a :Optional[Any] ) -> Optional[Any]:
requires_backends(cls , ["flax"] )
@classmethod
def _lowerCamelCase ( cls :int , *a :Optional[Any] , **a :Union[str, Any] ) -> Dict:
requires_backends(cls , ["flax"] )
| 232
|
import argparse
import json
from pathlib import Path
import torch
import torchaudio
from datasets import load_dataset
from huggingface_hub import hf_hub_download
from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase__ = logging.get_logger(__name__)
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
__a = ASTConfig()
if "10-10" in model_name:
pass
elif "speech-commands" in model_name:
__a = 128
elif "12-12" in model_name:
__a = 12
__a = 12
elif "14-14" in model_name:
__a = 14
__a = 14
elif "16-16" in model_name:
__a = 16
__a = 16
else:
raise ValueError("""Model not supported""" )
__a = """huggingface/label-files"""
if "speech-commands" in model_name:
__a = 35
__a = """speech-commands-v2-id2label.json"""
else:
__a = 527
__a = """audioset-id2label.json"""
__a = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type="""dataset""" ) , """r""" ) )
__a = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
__a = idalabel
__a = {v: k for k, v in idalabel.items()}
return config
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
if "module.v" in name:
__a = name.replace("""module.v""" , """audio_spectrogram_transformer""" )
if "cls_token" in name:
__a = name.replace("""cls_token""" , """embeddings.cls_token""" )
if "dist_token" in name:
__a = name.replace("""dist_token""" , """embeddings.distillation_token""" )
if "pos_embed" in name:
__a = name.replace("""pos_embed""" , """embeddings.position_embeddings""" )
if "patch_embed.proj" in name:
__a = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
# transformer blocks
if "blocks" in name:
__a = name.replace("""blocks""" , """encoder.layer""" )
if "attn.proj" in name:
__a = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
__a = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
__a = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
__a = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
__a = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
__a = name.replace("""mlp.fc2""" , """output.dense""" )
# final layernorm
if "audio_spectrogram_transformer.norm" in name:
__a = name.replace("""audio_spectrogram_transformer.norm""" , """audio_spectrogram_transformer.layernorm""" )
# classifier head
if "module.mlp_head.0" in name:
__a = name.replace("""module.mlp_head.0""" , """classifier.layernorm""" )
if "module.mlp_head.1" in name:
__a = name.replace("""module.mlp_head.1""" , """classifier.dense""" )
return name
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Union[str, Any] ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
__a = orig_state_dict.pop(_SCREAMING_SNAKE_CASE )
if "qkv" in key:
__a = key.split(""".""" )
__a = int(key_split[3] )
__a = config.hidden_size
if "weight" in key:
__a = val[:dim, :]
__a = val[dim : dim * 2, :]
__a = val[-dim:, :]
else:
__a = val[:dim]
__a = val[dim : dim * 2]
__a = val[-dim:]
else:
__a = val
return orig_state_dict
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[str] ):
"""simple docstring"""
__a = [
"""module.v.head.weight""",
"""module.v.head.bias""",
"""module.v.head_dist.weight""",
"""module.v.head_dist.bias""",
]
for k in ignore_keys:
state_dict.pop(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@torch.no_grad()
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : List[str]=False ):
"""simple docstring"""
__a = get_audio_spectrogram_transformer_config(_SCREAMING_SNAKE_CASE )
__a = {
"""ast-finetuned-audioset-10-10-0.4593""": (
"""https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1"""
),
"""ast-finetuned-audioset-10-10-0.450""": (
"""https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1"""
),
"""ast-finetuned-audioset-10-10-0.448""": (
"""https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1"""
),
"""ast-finetuned-audioset-10-10-0.448-v2""": (
"""https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1"""
),
"""ast-finetuned-audioset-12-12-0.447""": (
"""https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1"""
),
"""ast-finetuned-audioset-14-14-0.443""": (
"""https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1"""
),
"""ast-finetuned-audioset-16-16-0.442""": (
"""https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1"""
),
"""ast-finetuned-speech-commands-v2""": (
"""https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1"""
),
}
# load original state_dict
__a = model_name_to_url[model_name]
__a = torch.hub.load_state_dict_from_url(_SCREAMING_SNAKE_CASE , map_location="""cpu""" )
# remove some keys
remove_keys(_SCREAMING_SNAKE_CASE )
# rename some keys
__a = convert_state_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# load 🤗 model
__a = ASTForAudioClassification(_SCREAMING_SNAKE_CASE )
model.eval()
model.load_state_dict(_SCREAMING_SNAKE_CASE )
# verify outputs on dummy input
# source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62
__a = -4.267_7393 if """speech-commands""" not in model_name else -6.84_5978
__a = 4.568_9974 if """speech-commands""" not in model_name else 5.565_4526
__a = 1024 if """speech-commands""" not in model_name else 128
__a = ASTFeatureExtractor(mean=_SCREAMING_SNAKE_CASE , std=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE )
if "speech-commands" in model_name:
__a = load_dataset("""speech_commands""" , """v0.02""" , split="""validation""" )
__a = dataset[0]["""audio"""]["""array"""]
else:
__a = hf_hub_download(
repo_id="""nielsr/audio-spectogram-transformer-checkpoint""" , filename="""sample_audio.flac""" , repo_type="""dataset""" , )
__a , __a = torchaudio.load(_SCREAMING_SNAKE_CASE )
__a = waveform.squeeze().numpy()
__a = feature_extractor(_SCREAMING_SNAKE_CASE , sampling_rate=1_6000 , return_tensors="""pt""" )
# forward pass
__a = model(**_SCREAMING_SNAKE_CASE )
__a = outputs.logits
if model_name == "ast-finetuned-audioset-10-10-0.4593":
__a = torch.tensor([-0.8760, -7.0042, -8.6602] )
elif model_name == "ast-finetuned-audioset-10-10-0.450":
__a = torch.tensor([-1.1986, -7.0903, -8.2718] )
elif model_name == "ast-finetuned-audioset-10-10-0.448":
__a = torch.tensor([-2.6128, -8.0080, -9.4344] )
elif model_name == "ast-finetuned-audioset-10-10-0.448-v2":
__a = torch.tensor([-1.5080, -7.4534, -8.8917] )
elif model_name == "ast-finetuned-audioset-12-12-0.447":
__a = torch.tensor([-0.5050, -6.5833, -8.0843] )
elif model_name == "ast-finetuned-audioset-14-14-0.443":
__a = torch.tensor([-0.3826, -7.0336, -8.2413] )
elif model_name == "ast-finetuned-audioset-16-16-0.442":
__a = torch.tensor([-1.2113, -6.9101, -8.3470] )
elif model_name == "ast-finetuned-speech-commands-v2":
__a = torch.tensor([6.1589, -8.0566, -8.7984] )
else:
raise ValueError("""Unknown model name""" )
if not torch.allclose(logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 ):
raise ValueError("""Logits don't match""" )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
print(f"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
print(f"Saving feature extractor to {pytorch_dump_folder_path}" )
feature_extractor.save_pretrained(_SCREAMING_SNAKE_CASE )
if push_to_hub:
print("""Pushing model and feature extractor to the hub...""" )
model.push_to_hub(f"MIT/{model_name}" )
feature_extractor.push_to_hub(f"MIT/{model_name}" )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""ast-finetuned-audioset-10-10-0.4593""",
type=str,
help="""Name of the Audio Spectrogram Transformer model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
lowerCamelCase__ = parser.parse_args()
convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 302
| 0
|
from typing import Dict
from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
get_torch_dist_unique_port,
require_torch_multi_gpu,
require_torch_neuroncore,
)
from transformers.training_args import ParallelMode
from transformers.utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
from transformers import Trainer
class lowerCAmelCase__ ( lowerCamelCase__):
'''simple docstring'''
def __init__( self , __lowerCamelCase = 1_0_1) -> Dict:
_A : int = length
def __len__( self) -> str:
return self.length
def __getitem__( self , __lowerCamelCase) -> Optional[int]:
return i
class lowerCAmelCase__ :
'''simple docstring'''
def __call__( self , __lowerCamelCase) -> Tuple:
return {"input_ids": torch.tensor(__lowercase), "labels": torch.tensor(__lowercase)}
class lowerCAmelCase__ ( nn.Module):
'''simple docstring'''
def __init__( self) -> Tuple:
super().__init__()
# Add some (unused) params otherwise DDP will complain.
_A : str = nn.Linear(1_2_0 , 8_0)
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase=None) -> Tuple:
if labels is not None:
return torch.tensor(0.0 , device=input_ids.device), input_ids
else:
return input_ids
class lowerCAmelCase__ ( lowerCamelCase__):
'''simple docstring'''
@require_torch_neuroncore
def _lowerCamelCase ( self) -> int:
_A : Any = F"--nproc_per_node=2\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n ".split()
_A : Any = self.get_auto_remove_tmp_dir()
_A : int = F"--output_dir {output_dir}".split()
_A : List[Any] = ["torchrun"] + distributed_args + args
execute_subprocess_async(__lowercase , env=self.get_env())
# successful return here == success - any errors would have caused an error in the sub-call
class lowerCAmelCase__ ( lowerCamelCase__):
'''simple docstring'''
@require_torch_multi_gpu
def _lowerCamelCase ( self) -> List[str]:
_A : str = F"--nproc_per_node={torch.cuda.device_count()}\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n ".split()
_A : Dict = self.get_auto_remove_tmp_dir()
_A : Any = F"--output_dir {output_dir}".split()
_A : List[Any] = ["torchrun"] + distributed_args + args
execute_subprocess_async(__lowercase , env=self.get_env())
# successful return here == success - any errors would have caused an error in the sub-call
if __name__ == "__main__":
# The script below is meant to be run under torch.distributed, on a machine with multiple GPUs:
#
# PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py
lowerCAmelCase__ = HfArgumentParser((TrainingArguments,))
lowerCAmelCase__ = parser.parse_args_into_dataclasses()[0]
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, "
f"distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}"
)
# Essentially, what we want to verify in the distributed case is that we get all samples back,
# in the right order. (this is crucial for prediction for instance)
for dataset_length in [1_01, 40, 7]:
lowerCAmelCase__ = DummyDataset(dataset_length)
def _UpperCAmelCase (UpperCamelCase__ : EvalPrediction ):
_A : Any = list(range(len(_SCREAMING_SNAKE_CASE ) ) )
_A : int = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential
if not success and training_args.local_rank == 0:
logger.warning(
"Predictions and/or labels do not match expected results:\n - predictions: "
f"{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}" )
return {"success": success}
lowerCAmelCase__ = Trainer(
model=DummyModel(),
args=training_args,
data_collator=DummyDataCollator(),
eval_dataset=dataset,
compute_metrics=compute_metrics,
)
lowerCAmelCase__ = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
lowerCAmelCase__ = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
lowerCAmelCase__ = 2
lowerCAmelCase__ = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
lowerCAmelCase__ = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
lowerCAmelCase__ = None
| 11
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_albert import AlbertTokenizer
else:
lowerCamelCase__ = None
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
lowerCamelCase__ = {
"""vocab_file""": {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/spiece.model""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/spiece.model""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/spiece.model""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/spiece.model""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model""",
},
"""tokenizer_file""": {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json""",
},
}
lowerCamelCase__ = {
"""albert-base-v1""": 512,
"""albert-large-v1""": 512,
"""albert-xlarge-v1""": 512,
"""albert-xxlarge-v1""": 512,
"""albert-base-v2""": 512,
"""albert-large-v2""": 512,
"""albert-xlarge-v2""": 512,
"""albert-xxlarge-v2""": 512,
}
lowerCamelCase__ = """▁"""
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
__lowerCamelCase : List[Any] =VOCAB_FILES_NAMES
__lowerCamelCase : Union[str, Any] =PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase : List[str] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase : Any =AlbertTokenizer
def __init__( self : Tuple , __lowercase : Union[str, Any]=None , __lowercase : Optional[int]=None , __lowercase : int=True , __lowercase : Dict=True , __lowercase : str=False , __lowercase : str="[CLS]" , __lowercase : List[Any]="[SEP]" , __lowercase : Any="<unk>" , __lowercase : List[Any]="[SEP]" , __lowercase : List[Any]="<pad>" , __lowercase : Optional[Any]="[CLS]" , __lowercase : List[str]="[MASK]" , **__lowercase : str , ):
'''simple docstring'''
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
__a = (
AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase , normalized=__lowercase )
if isinstance(__lowercase , __lowercase )
else mask_token
)
super().__init__(
__lowercase , tokenizer_file=__lowercase , do_lower_case=__lowercase , remove_space=__lowercase , keep_accents=__lowercase , bos_token=__lowercase , eos_token=__lowercase , unk_token=__lowercase , sep_token=__lowercase , pad_token=__lowercase , cls_token=__lowercase , mask_token=__lowercase , **__lowercase , )
__a = do_lower_case
__a = remove_space
__a = keep_accents
__a = vocab_file
__a = False if not self.vocab_file else True
def UpperCamelCase_ ( self : Dict , __lowercase : List[int] , __lowercase : Optional[List[int]] = None ):
'''simple docstring'''
__a = [self.sep_token_id]
__a = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCamelCase_ ( self : str , __lowercase : List[int] , __lowercase : Optional[List[int]] = None ):
'''simple docstring'''
__a = [self.sep_token_id]
__a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase_ ( self : Tuple , __lowercase : str , __lowercase : Optional[str] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(__lowercase ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
__a = os.path.join(
__lowercase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowercase ):
copyfile(self.vocab_file , __lowercase )
return (out_vocab_file,)
| 302
| 0
|
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
UpperCAmelCase : List[Any] = _symbol_database.Default()
UpperCAmelCase : List[str] = _descriptor_pool.Default().AddSerializedFile(
B'''\n\x19sentencepiece_model.proto\x12\rsentencepiece\"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12\"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12\"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18\" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse\"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32\".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL\"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03'''
)
UpperCAmelCase : List[str] = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, '''sentencepiece_model_pb2''', _globals)
if _descriptor._USE_C_DESCRIPTORS is False:
UpperCAmelCase : Union[str, Any] = None
UpperCAmelCase : Dict = B'''H\003'''
# (generated by protobuf compiler, but `_TRAINERSPEC` is not defined)
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001"
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001"
UpperCAmelCase : List[Any] = 45
UpperCAmelCase : List[Any] = 15_81
UpperCAmelCase : int = 15_17
UpperCAmelCase : str = 15_70
UpperCAmelCase : Dict = 15_84
UpperCAmelCase : int = 17_93
UpperCAmelCase : Union[str, Any] = 17_95
UpperCAmelCase : Dict = 19_16
UpperCAmelCase : Union[str, Any] = 18_64
UpperCAmelCase : Dict = 19_05
UpperCAmelCase : List[str] = 19_19
UpperCAmelCase : Optional[int] = 24_29
UpperCAmelCase : Union[str, Any] = 22_08
UpperCAmelCase : str = 24_18
UpperCAmelCase : List[str] = 23_23
UpperCAmelCase : List[str] = 24_07
# @@protoc_insertion_point(module_scope)
| 280
|
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
__lowerCamelCase : Optional[int] =(IPNDMScheduler,)
__lowerCamelCase : int =(('num_inference_steps', 50),)
def UpperCamelCase_ ( self : str , **__lowercase : Dict ):
'''simple docstring'''
__a = {"""num_train_timesteps""": 1000}
config.update(**__lowercase )
return config
def UpperCamelCase_ ( self : Any , __lowercase : Tuple=0 , **__lowercase : Dict ):
'''simple docstring'''
__a = dict(self.forward_default_kwargs )
__a = kwargs.pop("""num_inference_steps""" , __lowercase )
__a = self.dummy_sample
__a = 0.1 * sample
__a = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
__a = self.get_scheduler_config(**__lowercase )
__a = scheduler_class(**__lowercase )
scheduler.set_timesteps(__lowercase )
# copy over dummy past residuals
__a = dummy_past_residuals[:]
if time_step is None:
__a = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__lowercase )
__a = scheduler_class.from_pretrained(__lowercase )
new_scheduler.set_timesteps(__lowercase )
# copy over dummy past residuals
__a = dummy_past_residuals[:]
__a = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
__a = new_scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
__a = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
__a = new_scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self : str , __lowercase : int=0 , **__lowercase : Dict ):
'''simple docstring'''
__a = dict(self.forward_default_kwargs )
__a = kwargs.pop("""num_inference_steps""" , __lowercase )
__a = self.dummy_sample
__a = 0.1 * sample
__a = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
__a = self.get_scheduler_config()
__a = scheduler_class(**__lowercase )
scheduler.set_timesteps(__lowercase )
# copy over dummy past residuals (must be after setting timesteps)
__a = dummy_past_residuals[:]
if time_step is None:
__a = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__lowercase )
__a = scheduler_class.from_pretrained(__lowercase )
# copy over dummy past residuals
new_scheduler.set_timesteps(__lowercase )
# copy over dummy past residual (must be after setting timesteps)
__a = dummy_past_residuals[:]
__a = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
__a = new_scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
__a = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
__a = new_scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCamelCase_ ( self : List[str] , **__lowercase : Dict ):
'''simple docstring'''
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config(**__lowercase )
__a = scheduler_class(**__lowercase )
__a = 10
__a = self.dummy_model()
__a = self.dummy_sample_deter
scheduler.set_timesteps(__lowercase )
for i, t in enumerate(scheduler.timesteps ):
__a = model(__lowercase , __lowercase )
__a = scheduler.step(__lowercase , __lowercase , __lowercase ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
__a = model(__lowercase , __lowercase )
__a = scheduler.step(__lowercase , __lowercase , __lowercase ).prev_sample
return sample
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
__a = dict(self.forward_default_kwargs )
__a = kwargs.pop("""num_inference_steps""" , __lowercase )
for scheduler_class in self.scheduler_classes:
__a = self.get_scheduler_config()
__a = scheduler_class(**__lowercase )
__a = self.dummy_sample
__a = 0.1 * sample
if num_inference_steps is not None and hasattr(__lowercase , """set_timesteps""" ):
scheduler.set_timesteps(__lowercase )
elif num_inference_steps is not None and not hasattr(__lowercase , """set_timesteps""" ):
__a = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
__a = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
__a = dummy_past_residuals[:]
__a = scheduler.timesteps[5]
__a = scheduler.timesteps[6]
__a = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
__a = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
__a = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
__a = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=__lowercase , time_step=__lowercase )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=__lowercase , time_step=__lowercase )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
__a = self.full_loop()
__a = torch.mean(torch.abs(__lowercase ) )
assert abs(result_mean.item() - 2540529 ) < 10
| 302
| 0
|
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
# Register SEW's fairseq modules
from sew_asapp import tasks # noqa: F401
from transformers import (
SEWConfig,
SEWForCTC,
SEWModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
__lowerCamelCase : Optional[int] = logging.get_logger(__name__)
__lowerCamelCase : Tuple = {
'''post_extract_proj''': '''feature_projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.upsample.0''': '''encoder.upsample.projection''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''layer_norm''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
def _snake_case ( lowerCAmelCase : Tuple , lowerCAmelCase : int , lowerCAmelCase : Tuple , lowerCAmelCase : List[str] , lowerCAmelCase : Dict ):
"""simple docstring"""
for attribute in key.split("." ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if weight_type is not None:
SCREAMING_SNAKE_CASE_ : str = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).shape
else:
SCREAMING_SNAKE_CASE_ : Any = hf_pointer.shape
assert hf_shape == value.shape, (
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}'
)
if weight_type == "weight":
SCREAMING_SNAKE_CASE_ : Optional[int] = value
elif weight_type == "weight_g":
SCREAMING_SNAKE_CASE_ : Union[str, Any] = value
elif weight_type == "weight_v":
SCREAMING_SNAKE_CASE_ : Any = value
elif weight_type == "bias":
SCREAMING_SNAKE_CASE_ : int = value
else:
SCREAMING_SNAKE_CASE_ : Optional[int] = value
logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def _snake_case ( lowerCAmelCase : Any , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = []
SCREAMING_SNAKE_CASE_ : List[str] = fairseq_model.state_dict()
SCREAMING_SNAKE_CASE_ : List[str] = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
SCREAMING_SNAKE_CASE_ : str = False
if "conv_layers" in name:
load_conv_layer(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , hf_model.config.feat_extract_norm == "group" , )
SCREAMING_SNAKE_CASE_ : int = True
else:
for key, mapped_key in MAPPING.items():
SCREAMING_SNAKE_CASE_ : List[str] = "sew." + mapped_key if (is_finetuned and mapped_key != "lm_head") else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
SCREAMING_SNAKE_CASE_ : str = True
if "*" in mapped_key:
SCREAMING_SNAKE_CASE_ : Any = name.split(_SCREAMING_SNAKE_CASE )[0].split("." )[-2]
SCREAMING_SNAKE_CASE_ : str = mapped_key.replace("*" , _SCREAMING_SNAKE_CASE )
if "weight_g" in name:
SCREAMING_SNAKE_CASE_ : Any = "weight_g"
elif "weight_v" in name:
SCREAMING_SNAKE_CASE_ : List[str] = "weight_v"
elif "weight" in name:
SCREAMING_SNAKE_CASE_ : Optional[int] = "weight"
elif "bias" in name:
SCREAMING_SNAKE_CASE_ : Optional[Any] = "bias"
else:
SCREAMING_SNAKE_CASE_ : List[Any] = None
set_recursively(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
continue
if not is_used:
unused_weights.append(_SCREAMING_SNAKE_CASE )
logger.warning(f'Unused weights: {unused_weights}' )
def _snake_case ( lowerCAmelCase : List[Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : List[Any] , lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = full_name.split("conv_layers." )[-1]
SCREAMING_SNAKE_CASE_ : List[str] = name.split("." )
SCREAMING_SNAKE_CASE_ : Optional[int] = int(items[0] )
SCREAMING_SNAKE_CASE_ : List[str] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
SCREAMING_SNAKE_CASE_ : Tuple = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
SCREAMING_SNAKE_CASE_ : Optional[Any] = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
SCREAMING_SNAKE_CASE_ : List[Any] = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(_SCREAMING_SNAKE_CASE )
def _snake_case ( lowerCAmelCase : List[str] , lowerCAmelCase : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = SEWConfig()
if is_finetuned:
SCREAMING_SNAKE_CASE_ : Optional[int] = model.wav_encoder.wav_model.cfg
else:
SCREAMING_SNAKE_CASE_ : Tuple = model.cfg
SCREAMING_SNAKE_CASE_ : Tuple = fs_config.conv_bias
SCREAMING_SNAKE_CASE_ : List[str] = eval(fs_config.conv_feature_layers )
SCREAMING_SNAKE_CASE_ : Optional[int] = [x[0] for x in conv_layers]
SCREAMING_SNAKE_CASE_ : Tuple = [x[1] for x in conv_layers]
SCREAMING_SNAKE_CASE_ : str = [x[2] for x in conv_layers]
SCREAMING_SNAKE_CASE_ : Dict = "gelu"
SCREAMING_SNAKE_CASE_ : Tuple = "layer" if fs_config.extractor_mode == "layer_norm" else "group"
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 0.0
SCREAMING_SNAKE_CASE_ : List[Any] = fs_config.activation_fn.name
SCREAMING_SNAKE_CASE_ : Dict = fs_config.encoder_embed_dim
SCREAMING_SNAKE_CASE_ : Dict = 0.02
SCREAMING_SNAKE_CASE_ : str = fs_config.encoder_ffn_embed_dim
SCREAMING_SNAKE_CASE_ : Tuple = 1E-5
SCREAMING_SNAKE_CASE_ : int = fs_config.encoder_layerdrop
SCREAMING_SNAKE_CASE_ : str = fs_config.encoder_attention_heads
SCREAMING_SNAKE_CASE_ : int = fs_config.conv_pos_groups
SCREAMING_SNAKE_CASE_ : Dict = fs_config.conv_pos
SCREAMING_SNAKE_CASE_ : Any = len(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Tuple = fs_config.encoder_layers
SCREAMING_SNAKE_CASE_ : str = fs_config.squeeze_factor
# take care of any params that are overridden by the Wav2VecCtc model
if is_finetuned:
SCREAMING_SNAKE_CASE_ : List[Any] = model.cfg
SCREAMING_SNAKE_CASE_ : Dict = fs_config.final_dropout
SCREAMING_SNAKE_CASE_ : Tuple = fs_config.layerdrop
SCREAMING_SNAKE_CASE_ : int = fs_config.activation_dropout
SCREAMING_SNAKE_CASE_ : List[str] = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0
SCREAMING_SNAKE_CASE_ : str = fs_config.attention_dropout
SCREAMING_SNAKE_CASE_ : int = fs_config.dropout_input
SCREAMING_SNAKE_CASE_ : Tuple = fs_config.dropout
SCREAMING_SNAKE_CASE_ : List[Any] = fs_config.mask_channel_length
SCREAMING_SNAKE_CASE_ : str = fs_config.mask_channel_prob
SCREAMING_SNAKE_CASE_ : Dict = fs_config.mask_length
SCREAMING_SNAKE_CASE_ : List[str] = fs_config.mask_prob
SCREAMING_SNAKE_CASE_ : List[Any] = "Wav2Vec2FeatureExtractor"
SCREAMING_SNAKE_CASE_ : int = "Wav2Vec2CTCTokenizer"
return config
@torch.no_grad()
def _snake_case ( lowerCAmelCase : Optional[Any] , lowerCAmelCase : List[Any] , lowerCAmelCase : Optional[Any]=None , lowerCAmelCase : List[Any]=None , lowerCAmelCase : Any=True ):
"""simple docstring"""
if is_finetuned:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
else:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
if config_path is not None:
SCREAMING_SNAKE_CASE_ : Dict = SEWConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
else:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = convert_config(model[0] , _SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : int = model[0].eval()
SCREAMING_SNAKE_CASE_ : List[str] = True if config.feat_extract_norm == "layer" else False
SCREAMING_SNAKE_CASE_ : str = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , )
if is_finetuned:
if dict_path:
SCREAMING_SNAKE_CASE_ : List[str] = Dictionary.load(_SCREAMING_SNAKE_CASE )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
SCREAMING_SNAKE_CASE_ : Union[str, Any] = target_dict.pad_index
SCREAMING_SNAKE_CASE_ : Tuple = target_dict.bos_index
SCREAMING_SNAKE_CASE_ : Optional[int] = target_dict.pad_index
SCREAMING_SNAKE_CASE_ : int = target_dict.bos_index
SCREAMING_SNAKE_CASE_ : List[str] = target_dict.eos_index
SCREAMING_SNAKE_CASE_ : int = len(target_dict.symbols )
SCREAMING_SNAKE_CASE_ : Any = os.path.join(_SCREAMING_SNAKE_CASE , "vocab.json" )
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(_SCREAMING_SNAKE_CASE ) )
return
os.makedirs(_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE )
with open(_SCREAMING_SNAKE_CASE , "w" , encoding="utf-8" ) as vocab_handle:
json.dump(target_dict.indices , _SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Optional[int] = WavaVecaCTCTokenizer(
_SCREAMING_SNAKE_CASE , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=_SCREAMING_SNAKE_CASE , )
SCREAMING_SNAKE_CASE_ : Tuple = WavaVecaProcessor(feature_extractor=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE )
processor.save_pretrained(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : List[str] = SEWForCTC(_SCREAMING_SNAKE_CASE )
else:
SCREAMING_SNAKE_CASE_ : List[str] = SEWModel(_SCREAMING_SNAKE_CASE )
feature_extractor.save_pretrained(_SCREAMING_SNAKE_CASE )
recursively_load_weights(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
hf_model.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__lowerCamelCase : Any = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--is_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
__lowerCamelCase : int = parser.parse_args()
convert_sew_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned
)
| 18
|
from __future__ import annotations
lowerCamelCase__ = {
"""A""": ["""B""", """C""", """E"""],
"""B""": ["""A""", """D""", """E"""],
"""C""": ["""A""", """F""", """G"""],
"""D""": ["""B"""],
"""E""": ["""A""", """B""", """D"""],
"""F""": ["""C"""],
"""G""": ["""C"""],
}
class SCREAMING_SNAKE_CASE :
def __init__( self : Tuple , __lowercase : dict[str, list[str]] , __lowercase : str ):
'''simple docstring'''
__a = graph
# mapping node to its parent in resulting breadth first tree
__a = {}
__a = source_vertex
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
__a = {self.source_vertex}
__a = None
__a = [self.source_vertex] # first in first out queue
while queue:
__a = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(__lowercase )
__a = vertex
queue.append(__lowercase )
def UpperCamelCase_ ( self : Optional[int] , __lowercase : str ):
'''simple docstring'''
if target_vertex == self.source_vertex:
return self.source_vertex
__a = self.parent.get(__lowercase )
if target_vertex_parent is None:
__a = (
F"No path from vertex: {self.source_vertex} to vertex: {target_vertex}"
)
raise ValueError(__lowercase )
return self.shortest_path(__lowercase ) + F"->{target_vertex}"
if __name__ == "__main__":
lowerCamelCase__ = Graph(graph, """G""")
g.breath_first_search()
print(g.shortest_path("""D"""))
print(g.shortest_path("""G"""))
print(g.shortest_path("""Foo"""))
| 302
| 0
|
"""simple docstring"""
import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
lowerCamelCase_ : Optional[int] = {
"""facebook/maskformer-swin-base-ade""": (
"""https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json"""
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
lowerCamelCase_ : Any = logging.get_logger(__name__)
class __A ( lowerCamelCase__ ):
"""simple docstring"""
__lowerCAmelCase = 'maskformer'
__lowerCAmelCase = {'hidden_size': 'mask_feature_size'}
__lowerCAmelCase = ['resnet', 'swin']
__lowerCAmelCase = ['detr']
def __init__( self , __A = 256 , __A = 256 , __A = 0.1 , __A = False , __A = None , __A = None , __A = 0.02 , __A = 1.0 , __A = 1.0 , __A = 1.0 , __A = 20.0 , __A = None , **__A , ) -> Tuple:
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
a =SwinConfig(
image_size=384 , in_channels=3 , patch_size=4 , embed_dim=128 , depths=[2, 2, 18, 2] , num_heads=[4, 8, 16, 32] , window_size=12 , drop_path_rate=0.3 , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] , )
if isinstance(__lowercase , __lowercase ):
a =backbone_config.pop('''model_type''' )
a =CONFIG_MAPPING[backbone_model_type]
a =config_class.from_dict(__lowercase )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f'''Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. '''
f'''Supported model types: {",".join(self.backbones_supported )}''' )
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
a =DetrConfig()
else:
# verify that the decoder is supported
a =(
decoder_config.pop('''model_type''' ) if isinstance(__lowercase , __lowercase ) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
f'''Transformer Decoder {decoder_type} not supported, please use one of'''
f''' {",".join(self.decoders_supported )}''' )
if isinstance(__lowercase , __lowercase ):
a =CONFIG_MAPPING[decoder_type]
a =config_class.from_dict(__lowercase )
a =backbone_config
a =decoder_config
# main feature dimension for the model
a =fpn_feature_size
a =mask_feature_size
# initializer
a =init_std
a =init_xavier_std
# Hungarian matcher && loss
a =cross_entropy_weight
a =dice_weight
a =mask_weight
a =use_auxiliary_loss
a =no_object_weight
a =output_auxiliary_logits
a =self.decoder_config.encoder_attention_heads
a =self.decoder_config.num_hidden_layers
super().__init__(**__lowercase )
@classmethod
def SCREAMING_SNAKE_CASE ( cls , __A , __A , **__A ) -> Any:
return cls(
backbone_config=__lowercase , decoder_config=__lowercase , **__lowercase , )
def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
a =copy.deepcopy(self.__dict__ )
a =self.backbone_config.to_dict()
a =self.decoder_config.to_dict()
a =self.__class__.model_type
return output
| 81
|
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ , unittest.TestCase ):
__lowerCamelCase : Tuple =KandinskyVaaPriorPipeline
__lowerCamelCase : Union[str, Any] =['prompt']
__lowerCamelCase : Any =['prompt', 'negative_prompt']
__lowerCamelCase : List[str] =[
'num_images_per_prompt',
'generator',
'num_inference_steps',
'latents',
'negative_prompt',
'guidance_scale',
'output_type',
'return_dict',
]
__lowerCamelCase : List[Any] =False
@property
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
return 32
@property
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
return 32
@property
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
return self.time_input_dim
@property
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
return 100
@property
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
__a = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
return tokenizer
@property
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
torch.manual_seed(0 )
__a = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(__lowercase )
@property
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
torch.manual_seed(0 )
__a = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 12,
"""embedding_dim""": self.text_embedder_hidden_size,
"""num_layers""": 1,
}
__a = PriorTransformer(**__lowercase )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
__a = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
__a = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=224 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=14 , )
__a = CLIPVisionModelWithProjection(__lowercase )
return model
@property
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
__a = CLIPImageProcessor(
crop_size=224 , do_center_crop=__lowercase , do_normalize=__lowercase , do_resize=__lowercase , image_mean=[0.48145466, 0.4578275, 0.40821073] , image_std=[0.26862954, 0.26130258, 0.27577711] , resample=3 , size=224 , )
return image_processor
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
__a = self.dummy_prior
__a = self.dummy_image_encoder
__a = self.dummy_text_encoder
__a = self.dummy_tokenizer
__a = self.dummy_image_processor
__a = UnCLIPScheduler(
variance_type="""fixed_small_log""" , prediction_type="""sample""" , num_train_timesteps=1000 , clip_sample=__lowercase , clip_sample_range=10.0 , )
__a = {
"""prior""": prior,
"""image_encoder""": image_encoder,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""scheduler""": scheduler,
"""image_processor""": image_processor,
}
return components
def UpperCamelCase_ ( self : Optional[int] , __lowercase : List[str] , __lowercase : Any=0 ):
'''simple docstring'''
if str(__lowercase ).startswith("""mps""" ):
__a = torch.manual_seed(__lowercase )
else:
__a = torch.Generator(device=__lowercase ).manual_seed(__lowercase )
__a = {
"""prompt""": """horse""",
"""generator""": generator,
"""guidance_scale""": 4.0,
"""num_inference_steps""": 2,
"""output_type""": """np""",
}
return inputs
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
__a = """cpu"""
__a = self.get_dummy_components()
__a = self.pipeline_class(**__lowercase )
__a = pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
__a = pipe(**self.get_dummy_inputs(__lowercase ) )
__a = output.image_embeds
__a = pipe(
**self.get_dummy_inputs(__lowercase ) , return_dict=__lowercase , )[0]
__a = image[0, -10:]
__a = image_from_tuple[0, -10:]
assert image.shape == (1, 32)
__a = np.array(
[-0.0532, 1.7120, 0.3656, -1.0852, -0.8946, -1.1756, 0.4348, 0.2482, 0.5146, -0.1156] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
__a = torch_device == """cpu"""
__a = True
__a = False
self._test_inference_batch_single_identical(
test_max_difference=__lowercase , relax_max_difference=__lowercase , test_mean_pixel_difference=__lowercase , )
@skip_mps
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
__a = torch_device == """cpu"""
__a = False
self._test_attention_slicing_forward_pass(
test_max_difference=__lowercase , test_mean_pixel_difference=__lowercase , )
| 302
| 0
|
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES
from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType
from ...utils.imports import is_botoa_available
from .config_args import SageMakerConfig
from .config_utils import (
DYNAMO_BACKENDS,
_ask_field,
_ask_options,
_convert_dynamo_backend,
_convert_mixed_precision,
_convert_sagemaker_distributed_mode,
_convert_yes_no_to_bool,
)
if is_botoa_available():
import botoa # noqa: F401
def __UpperCAmelCase ( __lowerCamelCase ) -> Any:
lowercase__ : List[str] = botoa.client('''iam''' )
lowercase__ : Union[str, Any] = {
'''Version''': '''2012-10-17''',
'''Statement''': [
{'''Effect''': '''Allow''', '''Principal''': {'''Service''': '''sagemaker.amazonaws.com'''}, '''Action''': '''sts:AssumeRole'''}
],
}
try:
# create the role, associated with the chosen trust policy
iam_client.create_role(
RoleName=_SCREAMING_SNAKE_CASE , AssumeRolePolicyDocument=json.dumps(_SCREAMING_SNAKE_CASE , indent=2 ) )
lowercase__ : int = {
'''Version''': '''2012-10-17''',
'''Statement''': [
{
'''Effect''': '''Allow''',
'''Action''': [
'''sagemaker:*''',
'''ecr:GetDownloadUrlForLayer''',
'''ecr:BatchGetImage''',
'''ecr:BatchCheckLayerAvailability''',
'''ecr:GetAuthorizationToken''',
'''cloudwatch:PutMetricData''',
'''cloudwatch:GetMetricData''',
'''cloudwatch:GetMetricStatistics''',
'''cloudwatch:ListMetrics''',
'''logs:CreateLogGroup''',
'''logs:CreateLogStream''',
'''logs:DescribeLogStreams''',
'''logs:PutLogEvents''',
'''logs:GetLogEvents''',
'''s3:CreateBucket''',
'''s3:ListBucket''',
'''s3:GetBucketLocation''',
'''s3:GetObject''',
'''s3:PutObject''',
],
'''Resource''': '''*''',
}
],
}
# attach policy to role
iam_client.put_role_policy(
RoleName=_SCREAMING_SNAKE_CASE , PolicyName=f"""{role_name}_policy_permission""" , PolicyDocument=json.dumps(_SCREAMING_SNAKE_CASE , indent=2 ) , )
except iam_client.exceptions.EntityAlreadyExistsException:
print(f"""role {role_name} already exists. Using existing one""" )
def __UpperCAmelCase ( __lowerCamelCase ) -> str:
lowercase__ : List[Any] = botoa.client('''iam''' )
return iam_client.get_role(RoleName=_SCREAMING_SNAKE_CASE )["Role"]["Arn"]
def __UpperCAmelCase ( ) -> Union[str, Any]:
lowercase__ : Optional[int] = _ask_options(
'''How do you want to authorize?''' , ['''AWS Profile''', '''Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) '''] , _SCREAMING_SNAKE_CASE , )
lowercase__ : Optional[Any] = None
if credentials_configuration == 0:
lowercase__ : Optional[int] = _ask_field('''Enter your AWS Profile name: [default] ''' , default='''default''' )
lowercase__ : str = aws_profile
else:
print(
'''Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with,'''
'''`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`''' )
lowercase__ : int = _ask_field('''AWS Access Key ID: ''' )
lowercase__ : List[str] = aws_access_key_id
lowercase__ : int = _ask_field('''AWS Secret Access Key: ''' )
lowercase__ : Union[str, Any] = aws_secret_access_key
lowercase__ : Optional[Any] = _ask_field('''Enter your AWS Region: [us-east-1]''' , default='''us-east-1''' )
lowercase__ : List[str] = aws_region
lowercase__ : Dict = _ask_options(
'''Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?''' , ['''Provide IAM Role name''', '''Create new IAM role using credentials'''] , _SCREAMING_SNAKE_CASE , )
if role_management == 0:
lowercase__ : Union[str, Any] = _ask_field('''Enter your IAM role name: ''' )
else:
lowercase__ : Dict = '''accelerate_sagemaker_execution_role'''
print(f"""Accelerate will create an iam role \"{iam_role_name}\" using the provided credentials""" )
_create_iam_role_for_sagemaker(_SCREAMING_SNAKE_CASE )
lowercase__ : str = _ask_field(
'''Do you want to use custom Docker image? [yes/NO]: ''' , _convert_yes_no_to_bool , default=_SCREAMING_SNAKE_CASE , error_message='''Please enter yes or no.''' , )
lowercase__ : Union[str, Any] = None
if is_custom_docker_image:
lowercase__ : Optional[Any] = _ask_field('''Enter your Docker image: ''' , lambda __lowerCamelCase : str(_SCREAMING_SNAKE_CASE ).lower() )
lowercase__ : int = _ask_field(
'''Do you want to provide SageMaker input channels with data locations? [yes/NO]: ''' , _convert_yes_no_to_bool , default=_SCREAMING_SNAKE_CASE , error_message='''Please enter yes or no.''' , )
lowercase__ : Tuple = None
if is_sagemaker_inputs_enabled:
lowercase__ : Any = _ask_field(
'''Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): ''' , lambda __lowerCamelCase : str(_SCREAMING_SNAKE_CASE ).lower() , )
lowercase__ : List[Any] = _ask_field(
'''Do you want to enable SageMaker metrics? [yes/NO]: ''' , _convert_yes_no_to_bool , default=_SCREAMING_SNAKE_CASE , error_message='''Please enter yes or no.''' , )
lowercase__ : Optional[int] = None
if is_sagemaker_metrics_enabled:
lowercase__ : List[Any] = _ask_field(
'''Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): ''' , lambda __lowerCamelCase : str(_SCREAMING_SNAKE_CASE ).lower() , )
lowercase__ : Optional[Any] = _ask_options(
'''What is the distributed mode?''' , ['''No distributed training''', '''Data parallelism'''] , _convert_sagemaker_distributed_mode , )
lowercase__ : Optional[int] = {}
lowercase__ : int = _ask_field(
'''Do you wish to optimize your script with torch dynamo?[yes/NO]:''' , _convert_yes_no_to_bool , default=_SCREAMING_SNAKE_CASE , error_message='''Please enter yes or no.''' , )
if use_dynamo:
lowercase__ : Tuple = '''dynamo_'''
lowercase__ : Any = _ask_options(
'''Which dynamo backend would you like to use?''' , [x.lower() for x in DYNAMO_BACKENDS] , _convert_dynamo_backend , default=2 , )
lowercase__ : Union[str, Any] = _ask_field(
'''Do you want to customize the defaults sent to torch.compile? [yes/NO]: ''' , _convert_yes_no_to_bool , default=_SCREAMING_SNAKE_CASE , error_message='''Please enter yes or no.''' , )
if use_custom_options:
lowercase__ : List[str] = _ask_options(
'''Which mode do you want to use?''' , _SCREAMING_SNAKE_CASE , lambda __lowerCamelCase : TORCH_DYNAMO_MODES[int(_SCREAMING_SNAKE_CASE )] , default='''default''' , )
lowercase__ : List[str] = _ask_field(
'''Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: ''' , _convert_yes_no_to_bool , default=_SCREAMING_SNAKE_CASE , error_message='''Please enter yes or no.''' , )
lowercase__ : Union[str, Any] = _ask_field(
'''Do you want to enable dynamic shape tracing? [yes/NO]: ''' , _convert_yes_no_to_bool , default=_SCREAMING_SNAKE_CASE , error_message='''Please enter yes or no.''' , )
lowercase__ : Dict = '''Which EC2 instance type you want to use for your training?'''
if distributed_type != SageMakerDistributedType.NO:
lowercase__ : Union[str, Any] = _ask_options(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , lambda __lowerCamelCase : SAGEMAKER_PARALLEL_EC2_INSTANCES[int(_SCREAMING_SNAKE_CASE )] )
else:
eca_instance_query += "? [ml.p3.2xlarge]:"
lowercase__ : str = _ask_field(_SCREAMING_SNAKE_CASE , lambda __lowerCamelCase : str(_SCREAMING_SNAKE_CASE ).lower() , default='''ml.p3.2xlarge''' )
lowercase__ : Union[str, Any] = 1
if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL):
lowercase__ : List[Any] = _ask_field(
'''How many machines do you want use? [1]: ''' , _SCREAMING_SNAKE_CASE , default=1 , )
lowercase__ : Optional[Any] = _ask_options(
'''Do you wish to use FP16 or BF16 (mixed precision)?''' , ['''no''', '''fp16''', '''bf16''', '''fp8'''] , _convert_mixed_precision , )
if use_dynamo and mixed_precision == "no":
print(
'''Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts.''' )
return SageMakerConfig(
image_uri=_SCREAMING_SNAKE_CASE , compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER , distributed_type=_SCREAMING_SNAKE_CASE , use_cpu=_SCREAMING_SNAKE_CASE , dynamo_config=_SCREAMING_SNAKE_CASE , eca_instance_type=_SCREAMING_SNAKE_CASE , profile=_SCREAMING_SNAKE_CASE , region=_SCREAMING_SNAKE_CASE , iam_role_name=_SCREAMING_SNAKE_CASE , mixed_precision=_SCREAMING_SNAKE_CASE , num_machines=_SCREAMING_SNAKE_CASE , sagemaker_inputs_file=_SCREAMING_SNAKE_CASE , sagemaker_metrics_file=_SCREAMING_SNAKE_CASE , )
| 16
|
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = Dict[str, Any]
lowerCamelCase__ = List[Prediction]
@add_end_docstrings(lowerCamelCase__ )
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
def __init__( self : Tuple , *__lowercase : Tuple , **__lowercase : Optional[int] ):
'''simple docstring'''
super().__init__(*__lowercase , **__lowercase )
if self.framework == "tf":
raise ValueError(F"The {self.__class__} is only available in PyTorch." )
requires_backends(self , """vision""" )
self.check_model_type(
dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) )
def UpperCamelCase_ ( self : Optional[int] , **__lowercase : List[str] ):
'''simple docstring'''
__a = {}
if "threshold" in kwargs:
__a = kwargs["""threshold"""]
return {}, {}, postprocess_kwargs
def __call__( self : List[Any] , *__lowercase : Any , **__lowercase : Tuple ):
'''simple docstring'''
return super().__call__(*__lowercase , **__lowercase )
def UpperCamelCase_ ( self : str , __lowercase : Tuple ):
'''simple docstring'''
__a = load_image(__lowercase )
__a = torch.IntTensor([[image.height, image.width]] )
__a = self.image_processor(images=[image] , return_tensors="""pt""" )
if self.tokenizer is not None:
__a = self.tokenizer(text=inputs["""words"""] , boxes=inputs["""boxes"""] , return_tensors="""pt""" )
__a = target_size
return inputs
def UpperCamelCase_ ( self : Dict , __lowercase : List[str] ):
'''simple docstring'''
__a = model_inputs.pop("""target_size""" )
__a = self.model(**__lowercase )
__a = outputs.__class__({"""target_size""": target_size, **outputs} )
if self.tokenizer is not None:
__a = model_inputs["""bbox"""]
return model_outputs
def UpperCamelCase_ ( self : Optional[int] , __lowercase : List[Any] , __lowercase : List[Any]=0.9 ):
'''simple docstring'''
__a = model_outputs["""target_size"""]
if self.tokenizer is not None:
# This is a LayoutLMForTokenClassification variant.
# The OCR got the boxes and the model classified the words.
__a , __a = target_size[0].tolist()
def unnormalize(__lowercase : Optional[Any] ):
return self._get_bounding_box(
torch.Tensor(
[
(width * bbox[0] / 1000),
(height * bbox[1] / 1000),
(width * bbox[2] / 1000),
(height * bbox[3] / 1000),
] ) )
__a , __a = model_outputs["""logits"""].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 )
__a = [self.model.config.idalabel[prediction] for prediction in classes.tolist()]
__a = [unnormalize(__lowercase ) for bbox in model_outputs["""bbox"""].squeeze(0 )]
__a = ["""score""", """label""", """box"""]
__a = [dict(zip(__lowercase , __lowercase ) ) for vals in zip(scores.tolist() , __lowercase , __lowercase ) if vals[0] > threshold]
else:
# This is a regular ForObjectDetectionModel
__a = self.image_processor.post_process_object_detection(__lowercase , __lowercase , __lowercase )
__a = raw_annotations[0]
__a = raw_annotation["""scores"""]
__a = raw_annotation["""labels"""]
__a = raw_annotation["""boxes"""]
__a = scores.tolist()
__a = [self.model.config.idalabel[label.item()] for label in labels]
__a = [self._get_bounding_box(__lowercase ) for box in boxes]
# {"scores": [...], ...} --> [{"score":x, ...}, ...]
__a = ["""score""", """label""", """box"""]
__a = [
dict(zip(__lowercase , __lowercase ) )
for vals in zip(raw_annotation["""scores"""] , raw_annotation["""labels"""] , raw_annotation["""boxes"""] )
]
return annotation
def UpperCamelCase_ ( self : Optional[int] , __lowercase : "torch.Tensor" ):
'''simple docstring'''
if self.framework != "pt":
raise ValueError("""The ObjectDetectionPipeline is only available in PyTorch.""" )
__a , __a , __a , __a = box.int().tolist()
__a = {
"""xmin""": xmin,
"""ymin""": ymin,
"""xmax""": xmax,
"""ymax""": ymax,
}
return bbox
| 302
| 0
|
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import torch
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
@dataclass
class _snake_case ( lowerCamelCase__ ):
UpperCamelCase__ = 42
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_text_to_video_synth import TextToVideoSDPipeline
from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401
from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
| 281
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCamelCase__ = {
"""configuration_efficientnet""": [
"""EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""EfficientNetConfig""",
"""EfficientNetOnnxConfig""",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""EfficientNetImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""EfficientNetForImageClassification""",
"""EfficientNetModel""",
"""EfficientNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_efficientnet import (
EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
EfficientNetConfig,
EfficientNetOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientnet import EfficientNetImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientnet import (
EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientNetForImageClassification,
EfficientNetModel,
EfficientNetPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 302
| 0
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
_A : str =TypeVar('''T''')
class _lowercase ( Generic[T] ):
def __init__( self: List[str] , UpperCamelCase__: T ):
lowerCamelCase__ : List[Any] = data
lowerCamelCase__ : int = None
def __str__( self: List[str] ):
return F'''{self.data}'''
class _lowercase ( Generic[T] ):
def __init__( self: Union[str, Any] ):
lowerCamelCase__ : List[Any] = None
def __iter__( self: List[str] ):
lowerCamelCase__ : List[str] = self.top
while node:
yield node.data
lowerCamelCase__ : int = node.next
def __str__( self: Optional[Any] ):
return "->".join([str(__lowercase ) for item in self] )
def __len__( self: Any ):
return len(tuple(iter(self ) ) )
def lowerCamelCase_ ( self: Any ):
return self.top is None
def lowerCamelCase_ ( self: Tuple , UpperCamelCase__: T ):
lowerCamelCase__ : Dict = Node(__lowercase )
if not self.is_empty():
lowerCamelCase__ : Optional[int] = self.top
lowerCamelCase__ : Optional[Any] = node
def lowerCamelCase_ ( self: Optional[Any] ):
if self.is_empty():
raise IndexError("""pop from empty stack""" )
assert isinstance(self.top , __lowercase )
lowerCamelCase__ : Union[str, Any] = self.top
lowerCamelCase__ : List[str] = self.top.next
return pop_node.data
def lowerCamelCase_ ( self: Optional[Any] ):
if self.is_empty():
raise IndexError("""peek from empty stack""" )
assert self.top is not None
return self.top.data
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : Any = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 41
|
import random
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
__a , __a , __a = [], [], []
for element in data:
if element < pivot:
less.append(_SCREAMING_SNAKE_CASE )
elif element > pivot:
greater.append(_SCREAMING_SNAKE_CASE )
else:
equal.append(_SCREAMING_SNAKE_CASE )
return less, equal, greater
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
if index >= len(_SCREAMING_SNAKE_CASE ) or index < 0:
return None
__a = items[random.randint(0 , len(_SCREAMING_SNAKE_CASE ) - 1 )]
__a = 0
__a , __a , __a = _partition(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__a = len(_SCREAMING_SNAKE_CASE )
__a = len(_SCREAMING_SNAKE_CASE )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# must be in larger
else:
return quick_select(_SCREAMING_SNAKE_CASE , index - (m + count) )
| 302
| 0
|
# Author: OMKAR PATHAK, Nwachukwu Chidiebere
# Use a Python dictionary to construct the graph.
from __future__ import annotations
from pprint import pformat
from typing import Generic, TypeVar
a__: Optional[int] = TypeVar('T')
class SCREAMING_SNAKE_CASE__ ( Generic[T] ):
def __init__( self,__lowerCamelCase = True ):
A__ = {} # dictionary of lists
A__ = directed
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase ):
if not self.directed: # For undirected graphs
# if both source vertex and destination vertex are both present in the
# adjacency list, add destination vertex to source vertex list of adjacent
# vertices and add source vertex to destination vertex list of adjacent
# vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(__lowercase )
self.adj_list[destination_vertex].append(__lowercase )
# if only source vertex is present in adjacency list, add destination vertex
# to source vertex list of adjacent vertices, then create a new vertex with
# destination vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(__lowercase )
A__ = [source_vertex]
# if only destination vertex is present in adjacency list, add source vertex
# to destination vertex list of adjacent vertices, then create a new vertex
# with source vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif destination_vertex in self.adj_list:
self.adj_list[destination_vertex].append(__lowercase )
A__ = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and assign a list
# containing the destination vertex as it's first adjacent vertex also
# create a new vertex with destination vertex as key and assign a list
# containing the source vertex as it's first adjacent vertex.
else:
A__ = [destination_vertex]
A__ = [source_vertex]
else: # For directed graphs
# if both source vertex and destination vertex are present in adjacency
# list, add destination vertex to source vertex list of adjacent vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(__lowercase )
# if only source vertex is present in adjacency list, add destination
# vertex to source vertex list of adjacent vertices and create a new vertex
# with destination vertex as key, which has no adjacent vertex
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(__lowercase )
A__ = []
# if only destination vertex is present in adjacency list, create a new
# vertex with source vertex as key and assign a list containing destination
# vertex as first adjacent vertex
elif destination_vertex in self.adj_list:
A__ = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and a list containing
# destination vertex as it's first adjacent vertex. Then create a new vertex
# with destination vertex as key, which has no adjacent vertex
else:
A__ = [destination_vertex]
A__ = []
return self
def __repr__( self ):
return pformat(self.adj_list )
| 193
|
from collections import UserDict
from typing import Union
import numpy as np
import requests
from ..utils import (
add_end_docstrings,
logging,
)
from .audio_classification import ffmpeg_read
from .base import PIPELINE_INIT_ARGS, Pipeline
lowerCamelCase__ = logging.get_logger(__name__)
@add_end_docstrings(lowerCamelCase__ )
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
def __init__( self : Optional[int] , **__lowercase : Dict ):
'''simple docstring'''
super().__init__(**__lowercase )
if self.framework != "pt":
raise ValueError(F"The {self.__class__} is only available in PyTorch." )
# No specific FOR_XXX available yet
def __call__( self : str , __lowercase : Union[np.ndarray, bytes, str] , **__lowercase : int ):
'''simple docstring'''
return super().__call__(__lowercase , **__lowercase )
def UpperCamelCase_ ( self : List[Any] , **__lowercase : Union[str, Any] ):
'''simple docstring'''
__a = {}
if "candidate_labels" in kwargs:
__a = kwargs["""candidate_labels"""]
if "hypothesis_template" in kwargs:
__a = kwargs["""hypothesis_template"""]
return preprocess_params, {}, {}
def UpperCamelCase_ ( self : int , __lowercase : Dict , __lowercase : Dict=None , __lowercase : str="This is a sound of {}." ):
'''simple docstring'''
if isinstance(__lowercase , __lowercase ):
if audio.startswith("""http://""" ) or audio.startswith("""https://""" ):
# We need to actually check for a real protocol, otherwise it's impossible to use a local file
# like http_huggingface_co.png
__a = requests.get(__lowercase ).content
else:
with open(__lowercase , """rb""" ) as f:
__a = f.read()
if isinstance(__lowercase , __lowercase ):
__a = ffmpeg_read(__lowercase , self.feature_extractor.sampling_rate )
if not isinstance(__lowercase , np.ndarray ):
raise ValueError("""We expect a numpy ndarray as input""" )
if len(audio.shape ) != 1:
raise ValueError("""We expect a single channel audio input for ZeroShotAudioClassificationPipeline""" )
__a = self.feature_extractor(
[audio] , sampling_rate=self.feature_extractor.sampling_rate , return_tensors="""pt""" )
__a = candidate_labels
__a = [hypothesis_template.format(__lowercase ) for x in candidate_labels]
__a = self.tokenizer(__lowercase , return_tensors=self.framework , padding=__lowercase )
__a = [text_inputs]
return inputs
def UpperCamelCase_ ( self : Any , __lowercase : Any ):
'''simple docstring'''
__a = model_inputs.pop("""candidate_labels""" )
__a = model_inputs.pop("""text_inputs""" )
if isinstance(text_inputs[0] , __lowercase ):
__a = text_inputs[0]
else:
# Batching case.
__a = text_inputs[0][0]
__a = self.model(**__lowercase , **__lowercase )
__a = {
"""candidate_labels""": candidate_labels,
"""logits""": outputs.logits_per_audio,
}
return model_outputs
def UpperCamelCase_ ( self : Optional[Any] , __lowercase : Dict ):
'''simple docstring'''
__a = model_outputs.pop("""candidate_labels""" )
__a = model_outputs["""logits"""][0]
if self.framework == "pt":
__a = logits.softmax(dim=0 )
__a = probs.tolist()
else:
raise ValueError("""`tf` framework not supported.""" )
__a = [
{"""score""": score, """label""": candidate_label}
for score, candidate_label in sorted(zip(__lowercase , __lowercase ) , key=lambda __lowercase : -x[0] )
]
return result
| 302
| 0
|
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class snake_case__ ( unittest.TestCase ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=13 , lowerCAmelCase__=7 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=99 , lowerCAmelCase__=32 , lowerCAmelCase__=5 , lowerCAmelCase__=4 , lowerCAmelCase__=37 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=5_12 , lowerCAmelCase__=16 , lowerCAmelCase__=2 , lowerCAmelCase__=0.0_2 , lowerCAmelCase__=4 , ) -> List[Any]:
__magic_name__ : List[str] = parent
__magic_name__ : Dict = batch_size
__magic_name__ : Any = seq_length
__magic_name__ : Tuple = is_training
__magic_name__ : Any = use_attention_mask
__magic_name__ : Any = use_token_type_ids
__magic_name__ : List[Any] = use_labels
__magic_name__ : str = vocab_size
__magic_name__ : Union[str, Any] = hidden_size
__magic_name__ : str = num_hidden_layers
__magic_name__ : Optional[Any] = num_attention_heads
__magic_name__ : Union[str, Any] = intermediate_size
__magic_name__ : Tuple = hidden_act
__magic_name__ : Dict = hidden_dropout_prob
__magic_name__ : int = attention_probs_dropout_prob
__magic_name__ : List[str] = max_position_embeddings
__magic_name__ : List[Any] = type_vocab_size
__magic_name__ : Union[str, Any] = type_sequence_label_size
__magic_name__ : Dict = initializer_range
__magic_name__ : int = num_choices
def __magic_name__ ( self ) -> Tuple:
__magic_name__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__magic_name__ : Any = None
if self.use_attention_mask:
__magic_name__ : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
__magic_name__ : int = None
if self.use_token_type_ids:
__magic_name__ : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__magic_name__ : List[str] = RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowercase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def __magic_name__ ( self ) -> List[Any]:
__magic_name__ : List[Any] = self.prepare_config_and_inputs()
__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ : List[Any] = config_and_inputs
__magic_name__ : str = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def __magic_name__ ( self ) -> Optional[int]:
__magic_name__ : Optional[Any] = self.prepare_config_and_inputs()
__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ : List[Any] = config_and_inputs
__magic_name__ : int = True
__magic_name__ : Optional[int] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__magic_name__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class snake_case__ ( lowerCamelCase__ , unittest.TestCase ):
lowercase__ : Optional[Any] = True
lowercase__ : Optional[int] = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __magic_name__ ( self ) -> Optional[int]:
__magic_name__ : str = FlaxRobertaModelTester(self )
@slow
def __magic_name__ ( self ) -> List[str]:
for model_class_name in self.all_model_classes:
__magic_name__ : int = model_class_name.from_pretrained("""roberta-base""" , from_pt=__lowercase )
__magic_name__ : Union[str, Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(__lowercase )
| 342
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_batched,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
lowerCamelCase__ = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
__lowerCamelCase : Dict =['pixel_values']
def __init__( self : Optional[int] , __lowercase : bool = True , __lowercase : Optional[Dict[str, int]] = None , __lowercase : PILImageResampling = PILImageResampling.BICUBIC , __lowercase : bool = True , __lowercase : bool = True , __lowercase : Union[int, float] = 1 / 255 , __lowercase : Dict[str, int] = None , __lowercase : bool = True , __lowercase : Optional[Union[float, List[float]]] = None , __lowercase : Optional[Union[float, List[float]]] = None , **__lowercase : Dict , ):
'''simple docstring'''
super().__init__(**__lowercase )
__a = size if size is not None else {"""height""": 224, """width""": 224}
__a = get_size_dict(__lowercase )
__a = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
__a = get_size_dict(__lowercase , default_to_square=__lowercase , param_name="""crop_size""" )
__a = do_resize
__a = do_rescale
__a = do_normalize
__a = do_center_crop
__a = crop_size
__a = size
__a = resample
__a = rescale_factor
__a = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
__a = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def UpperCamelCase_ ( self : Any , __lowercase : np.ndarray , __lowercase : Dict[str, int] , __lowercase : PILImageResampling = PILImageResampling.BILINEAR , __lowercase : Optional[Union[str, ChannelDimension]] = None , **__lowercase : Optional[Any] , ):
'''simple docstring'''
__a = get_size_dict(__lowercase )
if "shortest_edge" in size:
__a = get_resize_output_image_size(__lowercase , size=size["""shortest_edge"""] , default_to_square=__lowercase )
# size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"])
elif "height" in size and "width" in size:
__a = (size["""height"""], size["""width"""])
else:
raise ValueError(F"Size must contain 'height' and 'width' keys or 'shortest_edge' key. Got {size.keys()}" )
return resize(__lowercase , size=__lowercase , resample=__lowercase , data_format=__lowercase , **__lowercase )
def UpperCamelCase_ ( self : str , __lowercase : np.ndarray , __lowercase : Dict[str, int] , __lowercase : Optional[Union[str, ChannelDimension]] = None , **__lowercase : List[Any] , ):
'''simple docstring'''
__a = get_size_dict(__lowercase )
if "height" not in size or "width" not in size:
raise ValueError(F"The `size` parameter must contain the keys (height, width). Got {size.keys()}" )
return center_crop(__lowercase , size=(size["""height"""], size["""width"""]) , data_format=__lowercase , **__lowercase )
def UpperCamelCase_ ( self : Any , __lowercase : np.ndarray , __lowercase : float , __lowercase : Optional[Union[str, ChannelDimension]] = None , **__lowercase : str ):
'''simple docstring'''
return rescale(__lowercase , scale=__lowercase , data_format=__lowercase , **__lowercase )
def UpperCamelCase_ ( self : List[Any] , __lowercase : np.ndarray , __lowercase : Union[float, List[float]] , __lowercase : Union[float, List[float]] , __lowercase : Optional[Union[str, ChannelDimension]] = None , **__lowercase : Any , ):
'''simple docstring'''
return normalize(__lowercase , mean=__lowercase , std=__lowercase , data_format=__lowercase , **__lowercase )
def UpperCamelCase_ ( self : Tuple , __lowercase : ImageInput , __lowercase : Optional[bool] = None , __lowercase : Dict[str, int] = None , __lowercase : PILImageResampling = None , __lowercase : bool = None , __lowercase : int = None , __lowercase : Optional[bool] = None , __lowercase : Optional[float] = None , __lowercase : Optional[bool] = None , __lowercase : Optional[Union[float, List[float]]] = None , __lowercase : Optional[Union[float, List[float]]] = None , __lowercase : Optional[Union[str, TensorType]] = None , __lowercase : Union[str, ChannelDimension] = ChannelDimension.FIRST , **__lowercase : List[Any] , ):
'''simple docstring'''
__a = do_resize if do_resize is not None else self.do_resize
__a = do_rescale if do_rescale is not None else self.do_rescale
__a = do_normalize if do_normalize is not None else self.do_normalize
__a = do_center_crop if do_center_crop is not None else self.do_center_crop
__a = crop_size if crop_size is not None else self.crop_size
__a = get_size_dict(__lowercase , param_name="""crop_size""" , default_to_square=__lowercase )
__a = resample if resample is not None else self.resample
__a = rescale_factor if rescale_factor is not None else self.rescale_factor
__a = image_mean if image_mean is not None else self.image_mean
__a = image_std if image_std is not None else self.image_std
__a = size if size is not None else self.size
__a = get_size_dict(__lowercase )
if not is_batched(__lowercase ):
__a = [images]
if not valid_images(__lowercase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
# All transformations expect numpy arrays.
__a = [to_numpy_array(__lowercase ) for image in images]
if do_resize:
__a = [self.resize(image=__lowercase , size=__lowercase , resample=__lowercase ) for image in images]
if do_center_crop:
__a = [self.center_crop(image=__lowercase , size=__lowercase ) for image in images]
if do_rescale:
__a = [self.rescale(image=__lowercase , scale=__lowercase ) for image in images]
if do_normalize:
__a = [self.normalize(image=__lowercase , mean=__lowercase , std=__lowercase ) for image in images]
__a = [to_channel_dimension_format(__lowercase , __lowercase ) for image in images]
__a = {"""pixel_values""": images}
return BatchFeature(data=__lowercase , tensor_type=__lowercase )
| 302
| 0
|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_batched,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
_lowerCAmelCase = logging.get_logger(__name__)
class lowerCAmelCase_( lowerCamelCase__ ):
'''simple docstring'''
__lowercase : Dict = ['pixel_values']
def __init__( self ,__UpperCAmelCase = True ,__UpperCAmelCase = None ,__UpperCAmelCase = PILImageResampling.BICUBIC ,__UpperCAmelCase = True ,__UpperCAmelCase = True ,__UpperCAmelCase = 1 / 255 ,__UpperCAmelCase = None ,__UpperCAmelCase = True ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,**__UpperCAmelCase ,) -> List[str]:
super().__init__(**__lowercase )
lowerCAmelCase__ : Optional[Any] = size if size is not None else {"""height""": 224, """width""": 224}
lowerCAmelCase__ : Optional[Any] = get_size_dict(__lowercase )
lowerCAmelCase__ : Any = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
lowerCAmelCase__ : Dict = get_size_dict(__lowercase ,default_to_square=__lowercase ,param_name="""crop_size""" )
lowerCAmelCase__ : Optional[Any] = do_resize
lowerCAmelCase__ : Any = do_rescale
lowerCAmelCase__ : List[str] = do_normalize
lowerCAmelCase__ : List[Any] = do_center_crop
lowerCAmelCase__ : List[Any] = crop_size
lowerCAmelCase__ : Tuple = size
lowerCAmelCase__ : List[str] = resample
lowerCAmelCase__ : Optional[Any] = rescale_factor
lowerCAmelCase__ : int = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
lowerCAmelCase__ : List[str] = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase = PILImageResampling.BILINEAR ,__UpperCAmelCase = None ,**__UpperCAmelCase ,) -> List[Any]:
lowerCAmelCase__ : Tuple = get_size_dict(__lowercase )
if "shortest_edge" in size:
lowerCAmelCase__ : List[Any] = get_resize_output_image_size(__lowercase ,size=size["""shortest_edge"""] ,default_to_square=__lowercase )
# size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"])
elif "height" in size and "width" in size:
lowerCAmelCase__ : Optional[int] = (size["""height"""], size["""width"""])
else:
raise ValueError(F"""Size must contain 'height' and 'width' keys or 'shortest_edge' key. Got {size.keys()}""" )
return resize(__lowercase ,size=__lowercase ,resample=__lowercase ,data_format=__lowercase ,**__lowercase )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase = None ,**__UpperCAmelCase ,) -> List[Any]:
lowerCAmelCase__ : Dict = get_size_dict(__lowercase )
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" )
return center_crop(__lowercase ,size=(size["""height"""], size["""width"""]) ,data_format=__lowercase ,**__lowercase )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase = None ,**__UpperCAmelCase ) -> Optional[int]:
return rescale(__lowercase ,scale=__lowercase ,data_format=__lowercase ,**__lowercase )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase = None ,**__UpperCAmelCase ,) -> Tuple:
return normalize(__lowercase ,mean=__lowercase ,std=__lowercase ,data_format=__lowercase ,**__lowercase )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = ChannelDimension.FIRST ,**__UpperCAmelCase ,) -> Union[str, Any]:
lowerCAmelCase__ : Tuple = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase__ : Dict = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase__ : Tuple = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase__ : Optional[int] = do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCAmelCase__ : List[str] = crop_size if crop_size is not None else self.crop_size
lowerCAmelCase__ : str = get_size_dict(__lowercase ,param_name="""crop_size""" ,default_to_square=__lowercase )
lowerCAmelCase__ : Optional[Any] = resample if resample is not None else self.resample
lowerCAmelCase__ : List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase__ : Any = image_mean if image_mean is not None else self.image_mean
lowerCAmelCase__ : Union[str, Any] = image_std if image_std is not None else self.image_std
lowerCAmelCase__ : List[str] = size if size is not None else self.size
lowerCAmelCase__ : List[str] = get_size_dict(__lowercase )
if not is_batched(__lowercase ):
lowerCAmelCase__ : Union[str, Any] = [images]
if not valid_images(__lowercase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
# All transformations expect numpy arrays.
lowerCAmelCase__ : int = [to_numpy_array(__lowercase ) for image in images]
if do_resize:
lowerCAmelCase__ : List[str] = [self.resize(image=__lowercase ,size=__lowercase ,resample=__lowercase ) for image in images]
if do_center_crop:
lowerCAmelCase__ : Optional[Any] = [self.center_crop(image=__lowercase ,size=__lowercase ) for image in images]
if do_rescale:
lowerCAmelCase__ : List[Any] = [self.rescale(image=__lowercase ,scale=__lowercase ) for image in images]
if do_normalize:
lowerCAmelCase__ : str = [self.normalize(image=__lowercase ,mean=__lowercase ,std=__lowercase ) for image in images]
lowerCAmelCase__ : List[Any] = [to_channel_dimension_format(__lowercase ,__lowercase ) for image in images]
lowerCAmelCase__ : Optional[int] = {"""pixel_values""": images}
return BatchFeature(data=__lowercase ,tensor_type=__lowercase )
| 37
|
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
__a = """hf-internal-testing/tiny-random-t5"""
__a = AutoTokenizer.from_pretrained(__lowercase )
__a = AutoModelForSeqaSeqLM.from_pretrained(__lowercase )
__a = tokenizer("""This is me""" , return_tensors="""pt""" )
__a = model.to_bettertransformer()
self.assertTrue(any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model.named_modules() ) )
__a = model.generate(**__lowercase )
__a = model.reverse_bettertransformer()
self.assertFalse(any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model.named_modules() ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__lowercase )
__a = AutoModelForSeqaSeqLM.from_pretrained(__lowercase )
self.assertFalse(
any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) )
__a = model_reloaded.generate(**__lowercase )
self.assertTrue(torch.allclose(__lowercase , __lowercase ) )
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
__a = """hf-internal-testing/tiny-random-t5"""
__a = AutoModelForSeqaSeqLM.from_pretrained(__lowercase )
__a = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(__lowercase ):
model.save_pretrained(__lowercase )
__a = model.reverse_bettertransformer()
model.save_pretrained(__lowercase )
| 302
| 0
|
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class lowerCamelCase__ ( lowerCamelCase__):
'''simple docstring'''
def _lowerCamelCase ( self :int ) -> str:
__UpperCamelCase : List[str] = SMALL_MODEL_IDENTIFIER
__UpperCamelCase : Tuple = "pt"
__UpperCamelCase : Tuple = "tf"
def _lowerCamelCase ( self :Optional[int] , a :str ) -> Optional[Any]:
__UpperCamelCase : Dict = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(__lowercase )
def _lowerCamelCase ( self :Any , a :Dict ) -> int:
__UpperCamelCase : List[Any] = TFAutoModel.from_pretrained(self.test_model , from_pt=__lowercase )
model_tf.save_pretrained(__lowercase )
def _lowerCamelCase ( self :Optional[int] ) -> str:
__UpperCamelCase : Optional[Any] = "mock_framework"
# Framework provided - return whatever the user provides
__UpperCamelCase : List[str] = FeaturesManager.determine_framework(self.test_model , __lowercase )
self.assertEqual(__lowercase , __lowercase )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(__lowercase )
__UpperCamelCase : Any = FeaturesManager.determine_framework(__lowercase , __lowercase )
self.assertEqual(__lowercase , __lowercase )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(__lowercase )
__UpperCamelCase : Union[str, Any] = FeaturesManager.determine_framework(__lowercase , __lowercase )
self.assertEqual(__lowercase , __lowercase )
def _lowerCamelCase ( self :int ) -> Optional[int]:
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(__lowercase )
__UpperCamelCase : Tuple = FeaturesManager.determine_framework(__lowercase )
self.assertEqual(__lowercase , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(__lowercase )
__UpperCamelCase : Union[str, Any] = FeaturesManager.determine_framework(__lowercase )
self.assertEqual(__lowercase , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(__lowercase ):
__UpperCamelCase : List[str] = FeaturesManager.determine_framework(__lowercase )
def _lowerCamelCase ( self :Any ) -> Tuple:
__UpperCamelCase : Optional[int] = MagicMock(return_value=__lowercase )
with patch("transformers.onnx.features.is_tf_available" , __lowercase ):
__UpperCamelCase : Optional[int] = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__lowercase , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
__UpperCamelCase : Tuple = MagicMock(return_value=__lowercase )
with patch("transformers.onnx.features.is_torch_available" , __lowercase ):
__UpperCamelCase : List[Any] = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__lowercase , self.framework_tf )
# Both in environment -> use PyTorch
__UpperCamelCase : int = MagicMock(return_value=__lowercase )
__UpperCamelCase : int = MagicMock(return_value=__lowercase )
with patch("transformers.onnx.features.is_tf_available" , __lowercase ), patch(
"transformers.onnx.features.is_torch_available" , __lowercase ):
__UpperCamelCase : Dict = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__lowercase , self.framework_pt )
# Both not in environment -> raise error
__UpperCamelCase : Any = MagicMock(return_value=__lowercase )
__UpperCamelCase : Dict = MagicMock(return_value=__lowercase )
with patch("transformers.onnx.features.is_tf_available" , __lowercase ), patch(
"transformers.onnx.features.is_torch_available" , __lowercase ):
with self.assertRaises(__lowercase ):
__UpperCamelCase : Optional[Any] = FeaturesManager.determine_framework(self.test_model )
| 232
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
lowerCamelCase__ = {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/config.json""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/config.json""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/config.json""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/config.json""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/config.json""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/config.json""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json""",
}
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
__lowerCamelCase : Optional[Any] ='albert'
def __init__( self : Optional[Any] , __lowercase : Union[str, Any]=30000 , __lowercase : List[str]=128 , __lowercase : Optional[Any]=4096 , __lowercase : Dict=12 , __lowercase : Any=1 , __lowercase : Optional[Any]=64 , __lowercase : Any=16384 , __lowercase : Any=1 , __lowercase : Union[str, Any]="gelu_new" , __lowercase : List[str]=0 , __lowercase : int=0 , __lowercase : Dict=512 , __lowercase : str=2 , __lowercase : List[str]=0.02 , __lowercase : Union[str, Any]=1E-12 , __lowercase : int=0.1 , __lowercase : Any="absolute" , __lowercase : Optional[int]=0 , __lowercase : Dict=2 , __lowercase : Optional[Any]=3 , **__lowercase : Any , ):
'''simple docstring'''
super().__init__(pad_token_id=__lowercase , bos_token_id=__lowercase , eos_token_id=__lowercase , **__lowercase )
__a = vocab_size
__a = embedding_size
__a = hidden_size
__a = num_hidden_layers
__a = num_hidden_groups
__a = num_attention_heads
__a = inner_group_num
__a = hidden_act
__a = intermediate_size
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = type_vocab_size
__a = initializer_range
__a = layer_norm_eps
__a = classifier_dropout_prob
__a = position_embedding_type
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
@property
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
if self.task == "multiple-choice":
__a = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
__a = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 302
| 0
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'google/vit-base-patch16-224': 'https://huggingface.co/vit-base-patch16-224/resolve/main/config.json',
# See all ViT models at https://huggingface.co/models?filter=vit
}
class lowerCAmelCase__ ( lowerCamelCase__):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 'vit'
def __init__( self , __lowerCamelCase=7_6_8 , __lowerCamelCase=1_2 , __lowerCamelCase=1_2 , __lowerCamelCase=3_0_7_2 , __lowerCamelCase="gelu" , __lowerCamelCase=0.0 , __lowerCamelCase=0.0 , __lowerCamelCase=0.0_2 , __lowerCamelCase=1e-12 , __lowerCamelCase=2_2_4 , __lowerCamelCase=1_6 , __lowerCamelCase=3 , __lowerCamelCase=True , __lowerCamelCase=1_6 , **__lowerCamelCase , ) -> Optional[int]:
super().__init__(**__lowercase)
_A : Union[str, Any] = hidden_size
_A : str = num_hidden_layers
_A : Tuple = num_attention_heads
_A : int = intermediate_size
_A : Any = hidden_act
_A : Tuple = hidden_dropout_prob
_A : str = attention_probs_dropout_prob
_A : Optional[Any] = initializer_range
_A : Any = layer_norm_eps
_A : int = image_size
_A : Optional[Any] = patch_size
_A : Union[str, Any] = num_channels
_A : Any = qkv_bias
_A : List[Any] = encoder_stride
class lowerCAmelCase__ ( lowerCamelCase__):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = version.parse("1.11")
@property
def _lowerCamelCase ( self) -> List[Any]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
])
@property
def _lowerCamelCase ( self) -> Tuple:
return 1e-4
| 11
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase__ = {
"""configuration_blip""": [
"""BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BlipConfig""",
"""BlipTextConfig""",
"""BlipVisionConfig""",
],
"""processing_blip""": ["""BlipProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""BlipImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BlipModel""",
"""BlipPreTrainedModel""",
"""BlipForConditionalGeneration""",
"""BlipForQuestionAnswering""",
"""BlipVisionModel""",
"""BlipTextModel""",
"""BlipForImageTextRetrieval""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFBlipModel""",
"""TFBlipPreTrainedModel""",
"""TFBlipForConditionalGeneration""",
"""TFBlipForQuestionAnswering""",
"""TFBlipVisionModel""",
"""TFBlipTextModel""",
"""TFBlipForImageTextRetrieval""",
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 302
| 0
|
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class _A:
"""simple docstring"""
UpperCamelCase : List[Any] = LEDConfig
UpperCamelCase : Any = {}
UpperCamelCase : Optional[Any] = 'gelu'
def __init__( self , _A , _A=13 , _A=7 , _A=True , _A=False , _A=99 , _A=32 , _A=2 , _A=4 , _A=37 , _A=0.1 , _A=0.1 , _A=20 , _A=2 , _A=1 , _A=0 , _A=4 , ):
__A : Optional[int] = parent
__A : Optional[Any] = batch_size
__A : Optional[int] = seq_length
__A : str = is_training
__A : Optional[int] = use_labels
__A : Optional[int] = vocab_size
__A : Any = hidden_size
__A : List[str] = num_hidden_layers
__A : List[Any] = num_attention_heads
__A : Optional[int] = intermediate_size
__A : str = hidden_dropout_prob
__A : Union[str, Any] = attention_probs_dropout_prob
__A : List[Any] = max_position_embeddings
__A : int = eos_token_id
__A : str = pad_token_id
__A : Optional[int] = bos_token_id
__A : List[str] = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
__A : Any = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
__A : Optional[Any] = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def UpperCAmelCase_ ( self ):
__A : Optional[int] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
__A : int = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
__A : Dict = tf.concat([input_ids, eos_tensor] , axis=1 )
__A : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__A : Optional[Any] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
__A : Dict = prepare_led_inputs_dict(__lowercase , __lowercase , __lowercase )
__A : List[Any] = tf.concat(
[tf.zeros_like(__lowercase )[:, :-1], tf.ones_like(__lowercase )[:, -1:]] , axis=-1 , )
__A : Union[str, Any] = global_attention_mask
return config, inputs_dict
def UpperCAmelCase_ ( self , _A , _A ):
__A : List[str] = TFLEDModel(config=__lowercase ).get_decoder()
__A : str = inputs_dict['input_ids']
__A : str = input_ids[:1, :]
__A : List[Any] = inputs_dict['attention_mask'][:1, :]
__A : Any = 1
# first forward pass
__A : int = model(__lowercase , attention_mask=__lowercase , use_cache=__lowercase )
__A , __A : str = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__A : List[Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
__A : List[Any] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
__A : int = tf.concat([input_ids, next_tokens] , axis=-1 )
__A : Union[str, Any] = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
__A : Tuple = model(__lowercase , attention_mask=__lowercase )[0]
__A : Optional[int] = model(__lowercase , attention_mask=__lowercase , past_key_values=__lowercase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
__A : Dict = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
__A : Optional[int] = output_from_no_past[:, -3:, random_slice_idx]
__A : Optional[Any] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__lowercase , __lowercase , rtol=1e-3 )
def _SCREAMING_SNAKE_CASE ( a , a , a , a=None , a=None , a=None , a=None , ) -> str:
if attention_mask is None:
__A : Tuple = tf.cast(tf.math.not_equal(_SCREAMING_SNAKE_CASE , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
__A : Dict = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
__A : Union[str, Any] = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__A : int = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class _A( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase : List[str] = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
UpperCamelCase : str = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
UpperCamelCase : Any = (
{
'conversational': TFLEDForConditionalGeneration,
'feature-extraction': TFLEDModel,
'summarization': TFLEDForConditionalGeneration,
'text2text-generation': TFLEDForConditionalGeneration,
'translation': TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
UpperCamelCase : Any = True
UpperCamelCase : Optional[Any] = False
UpperCamelCase : Union[str, Any] = False
UpperCamelCase : Any = False
def UpperCAmelCase_ ( self ):
__A : Optional[int] = TFLEDModelTester(self )
__A : Dict = ConfigTester(self , config_class=__lowercase )
def UpperCAmelCase_ ( self ):
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self ):
__A : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__lowercase )
def UpperCAmelCase_ ( self ):
__A , __A : int = self.model_tester.prepare_config_and_inputs_for_common()
__A : Any = tf.zeros_like(inputs_dict['attention_mask'] )
__A : int = 2
__A : Any = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict['global_attention_mask'] , )
__A : int = True
__A : Union[str, Any] = self.model_tester.seq_length
__A : Optional[Any] = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(_A ):
__A : Tuple = outputs.decoder_attentions
self.assertEqual(len(__lowercase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(_A ):
__A : Tuple = [t.numpy() for t in outputs.encoder_attentions]
__A : int = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(__lowercase ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(__lowercase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
__A : Union[str, Any] = True
__A : Any = False
__A : Tuple = False
__A : List[Any] = model_class(__lowercase )
__A : Union[str, Any] = model(self._prepare_for_class(__lowercase , __lowercase ) )
__A : Any = len(__lowercase )
self.assertEqual(config.output_hidden_states , __lowercase )
check_encoder_attentions_output(__lowercase )
if self.is_encoder_decoder:
__A : Optional[Any] = model_class(__lowercase )
__A : List[str] = model(self._prepare_for_class(__lowercase , __lowercase ) )
self.assertEqual(config.output_hidden_states , __lowercase )
check_decoder_attentions_output(__lowercase )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
__A : Any = True
__A : Any = model_class(__lowercase )
__A : Optional[Any] = model(self._prepare_for_class(__lowercase , __lowercase ) )
self.assertEqual(config.output_hidden_states , __lowercase )
check_encoder_attentions_output(__lowercase )
# Check attention is always last and order is fine
__A : List[Any] = True
__A : Union[str, Any] = True
__A : Union[str, Any] = model_class(__lowercase )
__A : str = model(self._prepare_for_class(__lowercase , __lowercase ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(__lowercase ) )
self.assertEqual(model.config.output_hidden_states , __lowercase )
check_encoder_attentions_output(__lowercase )
@unittest.skip('LED keeps using potentially symbolic tensors in conditionals and breaks tracing.' )
def UpperCAmelCase_ ( self ):
pass
def UpperCAmelCase_ ( self ):
pass
def _SCREAMING_SNAKE_CASE ( a ) -> Optional[Any]:
return tf.constant(_SCREAMING_SNAKE_CASE , dtype=tf.intaa )
UpperCAmelCase : List[Any] = 1E-4
@slow
@require_tf
class _A( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self ):
__A : List[str] = TFLEDForConditionalGeneration.from_pretrained('allenai/led-base-16384' ).led
# change to intended input here
__A : Tuple = _long_tensor([512 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] )
__A : Tuple = _long_tensor([128 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] )
__A : Dict = prepare_led_inputs_dict(model.config , __lowercase , __lowercase )
__A : str = model(**__lowercase )[0]
__A : str = (1, 1024, 768)
self.assertEqual(output.shape , __lowercase )
# change to expected output here
__A : int = tf.convert_to_tensor(
[[2.3_0_5_0, 2.8_2_7_9, 0.6_5_3_1], [-1.8_4_5_7, -0.1_4_5_5, -3.5_6_6_1], [-1.0_1_8_6, 0.4_5_8_6, -2.2_0_4_3]] , )
tf.debugging.assert_near(output[:, :3, :3] , __lowercase , atol=1e-3 )
def UpperCAmelCase_ ( self ):
__A : Optional[Any] = TFLEDForConditionalGeneration.from_pretrained('allenai/led-base-16384' )
# change to intended input here
__A : Tuple = _long_tensor([512 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] )
__A : List[str] = _long_tensor([128 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] )
__A : List[Any] = prepare_led_inputs_dict(model.config , __lowercase , __lowercase )
__A : Dict = model(**__lowercase )[0]
__A : List[str] = (1, 1024, model.config.vocab_size)
self.assertEqual(output.shape , __lowercase )
# change to expected output here
__A : Optional[int] = tf.convert_to_tensor(
[[3_3.6_5_0_7, 6.4_5_7_2, 1_6.8_0_8_9], [5.8_7_3_9, -2.4_2_3_8, 1_1.2_9_0_2], [-3.2_1_3_9, -4.3_1_4_9, 4.2_7_8_3]] , )
tf.debugging.assert_near(output[:, :3, :3] , __lowercase , atol=1e-3 , rtol=1e-3 )
| 280
|
class SCREAMING_SNAKE_CASE :
def __init__( self : List[Any] , __lowercase : Union[str, Any] ):
'''simple docstring'''
__a = val
__a = None
__a = None
def UpperCamelCase_ ( self : Union[str, Any] , __lowercase : Any ):
'''simple docstring'''
if self.val:
if val < self.val:
if self.left is None:
__a = Node(__lowercase )
else:
self.left.insert(__lowercase )
elif val > self.val:
if self.right is None:
__a = Node(__lowercase )
else:
self.right.insert(__lowercase )
else:
__a = val
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
if root:
inorder(root.left , _SCREAMING_SNAKE_CASE )
res.append(root.val )
inorder(root.right , _SCREAMING_SNAKE_CASE )
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
if len(_SCREAMING_SNAKE_CASE ) == 0:
return arr
__a = Node(arr[0] )
for i in range(1 , len(_SCREAMING_SNAKE_CASE ) ):
root.insert(arr[i] )
# Traverse BST in order.
__a = []
inorder(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return res
if __name__ == "__main__":
print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
| 302
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCamelCase : Tuple = {
'''configuration_pegasus_x''': ['''PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PegasusXConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : str = [
'''PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PegasusXForConditionalGeneration''',
'''PegasusXModel''',
'''PegasusXPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
__lowerCamelCase : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 18
|
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
__a = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__lowercase , """width_multiplier""" ) )
class SCREAMING_SNAKE_CASE :
def __init__( self : Dict , __lowercase : Union[str, Any] , __lowercase : Dict=13 , __lowercase : int=64 , __lowercase : Tuple=2 , __lowercase : Tuple=3 , __lowercase : Tuple="swish" , __lowercase : List[Any]=3 , __lowercase : List[str]=32 , __lowercase : int=0.1 , __lowercase : Union[str, Any]=0.02 , __lowercase : Optional[int]=True , __lowercase : Dict=True , __lowercase : Tuple=10 , __lowercase : str=None , __lowercase : Optional[Any]=0.25 , __lowercase : str=0.0 , __lowercase : Optional[Any]=0.0 , ):
'''simple docstring'''
__a = parent
__a = batch_size
__a = image_size
__a = patch_size
__a = num_channels
__a = make_divisible(512 * width_multiplier , divisor=8 )
__a = hidden_act
__a = conv_kernel_size
__a = output_stride
__a = classifier_dropout_prob
__a = use_labels
__a = is_training
__a = num_labels
__a = initializer_range
__a = scope
__a = width_multiplier
__a = ffn_dropout
__a = attn_dropout
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
__a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a = None
__a = None
if self.use_labels:
__a = ids_tensor([self.batch_size] , self.num_labels )
__a = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__a = self.get_config()
return config, pixel_values, labels, pixel_labels
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
return MobileViTVaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , )
def UpperCamelCase_ ( self : Tuple , __lowercase : Optional[Any] , __lowercase : int , __lowercase : Optional[Any] , __lowercase : Tuple ):
'''simple docstring'''
__a = MobileViTVaModel(config=__lowercase )
model.to(__lowercase )
model.eval()
__a = model(__lowercase )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def UpperCamelCase_ ( self : Union[str, Any] , __lowercase : List[Any] , __lowercase : str , __lowercase : Optional[int] , __lowercase : Union[str, Any] ):
'''simple docstring'''
__a = self.num_labels
__a = MobileViTVaForImageClassification(__lowercase )
model.to(__lowercase )
model.eval()
__a = model(__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self : int , __lowercase : str , __lowercase : Any , __lowercase : int , __lowercase : List[str] ):
'''simple docstring'''
__a = self.num_labels
__a = MobileViTVaForSemanticSegmentation(__lowercase )
model.to(__lowercase )
model.eval()
__a = model(__lowercase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
__a = model(__lowercase , labels=__lowercase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
__a = self.prepare_config_and_inputs()
__a , __a , __a , __a = config_and_inputs
__a = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
__lowerCamelCase : List[Any] =(
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
__lowerCamelCase : Any =(
{
'feature-extraction': MobileViTVaModel,
'image-classification': MobileViTVaForImageClassification,
'image-segmentation': MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__lowerCamelCase : Dict =False
__lowerCamelCase : Optional[Any] =False
__lowerCamelCase : int =False
__lowerCamelCase : Any =False
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
__a = MobileViTVaModelTester(self )
__a = MobileViTVaConfigTester(self , config_class=__lowercase , has_text_modality=__lowercase )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""MobileViTV2 does not use inputs_embeds""" )
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
pass
@unittest.skip(reason="""MobileViTV2 does not support input and output embeddings""" )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
pass
@unittest.skip(reason="""MobileViTV2 does not output attentions""" )
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason="""Got `CUDA error: misaligned address` for tests after this one being run.""" )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = model_class(__lowercase )
__a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a = [*signature.parameters.keys()]
__a = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __lowercase )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowercase )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
def check_hidden_states_output(__lowercase : List[str] , __lowercase : Optional[int] , __lowercase : List[str] ):
__a = model_class(__lowercase )
model.to(__lowercase )
model.eval()
with torch.no_grad():
__a = model(**self._prepare_for_class(__lowercase , __lowercase ) )
__a = outputs.hidden_states
__a = 5
self.assertEqual(len(__lowercase ) , __lowercase )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
__a = 2
for i in range(len(__lowercase ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = True
check_hidden_states_output(__lowercase , __lowercase , __lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__a = True
check_hidden_states_output(__lowercase , __lowercase , __lowercase )
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowercase )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__lowercase )
@slow
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a = MobileViTVaModel.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
def lowerCAmelCase__ ( ):
"""simple docstring"""
__a = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@cached_property
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
return (
MobileViTImageProcessor.from_pretrained("""apple/mobilevitv2-1.0-imagenet1k-256""" )
if is_vision_available()
else None
)
@slow
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
__a = MobileViTVaForImageClassification.from_pretrained("""apple/mobilevitv2-1.0-imagenet1k-256""" ).to(
__lowercase )
__a = self.default_image_processor
__a = prepare_img()
__a = image_processor(images=__lowercase , return_tensors="""pt""" ).to(__lowercase )
# forward pass
with torch.no_grad():
__a = model(**__lowercase )
# verify the logits
__a = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __lowercase )
__a = torch.tensor([-1.6336E00, -7.3204E-02, -5.1883E-01] ).to(__lowercase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowercase , atol=1E-4 ) )
@slow
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
__a = MobileViTVaForSemanticSegmentation.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
__a = model.to(__lowercase )
__a = MobileViTImageProcessor.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
__a = prepare_img()
__a = image_processor(images=__lowercase , return_tensors="""pt""" ).to(__lowercase )
# forward pass
with torch.no_grad():
__a = model(**__lowercase )
__a = outputs.logits
# verify the logits
__a = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , __lowercase )
__a = torch.tensor(
[
[[7.0863, 7.1525, 6.8201], [6.6931, 6.8770, 6.8933], [6.2978, 7.0366, 6.9636]],
[[-3.7134, -3.6712, -3.6675], [-3.5825, -3.3549, -3.4777], [-3.3435, -3.3979, -3.2857]],
[[-2.9329, -2.8003, -2.7369], [-3.0564, -2.4780, -2.0207], [-2.6889, -1.9298, -1.7640]],
] , device=__lowercase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __lowercase , atol=1E-4 ) )
@slow
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
__a = MobileViTVaForSemanticSegmentation.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
__a = model.to(__lowercase )
__a = MobileViTImageProcessor.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
__a = prepare_img()
__a = image_processor(images=__lowercase , return_tensors="""pt""" ).to(__lowercase )
# forward pass
with torch.no_grad():
__a = model(**__lowercase )
__a = outputs.logits.detach().cpu()
__a = image_processor.post_process_semantic_segmentation(outputs=__lowercase , target_sizes=[(50, 60)] )
__a = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , __lowercase )
__a = image_processor.post_process_semantic_segmentation(outputs=__lowercase )
__a = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , __lowercase )
| 302
| 0
|
"""simple docstring"""
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase_ : List[str] = logging.get_logger(__name__)
# TODO Update this
lowerCamelCase_ : Tuple = {
"""facebook/esm-1b""": """https://huggingface.co/facebook/esm-1b/resolve/main/config.json""",
# See all ESM models at https://huggingface.co/models?filter=esm
}
class __A ( lowerCamelCase__ ):
"""simple docstring"""
__lowerCAmelCase = 'esm'
def __init__( self , __A=None , __A=None , __A=None , __A=768 , __A=12 , __A=12 , __A=3072 , __A=0.1 , __A=0.1 , __A=1026 , __A=0.02 , __A=1E-1_2 , __A="absolute" , __A=True , __A=None , __A=False , __A=False , __A=None , __A=None , **__A , ) -> List[Any]:
super().__init__(pad_token_id=__lowercase , mask_token_id=__lowercase , **__lowercase )
a =vocab_size
a =hidden_size
a =num_hidden_layers
a =num_attention_heads
a =intermediate_size
a =hidden_dropout_prob
a =attention_probs_dropout_prob
a =max_position_embeddings
a =initializer_range
a =layer_norm_eps
a =position_embedding_type
a =use_cache
a =emb_layer_norm_before
a =token_dropout
a =is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info('''No esmfold_config supplied for folding model, using default values.''' )
a =EsmFoldConfig()
elif isinstance(__lowercase , __lowercase ):
a =EsmFoldConfig(**__lowercase )
a =esmfold_config
if vocab_list is None:
logger.warning('''No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!''' )
a =get_default_vocab_list()
else:
a =vocab_list
else:
a =None
a =None
if self.esmfold_config is not None and getattr(self.esmfold_config , '''use_esm_attn_map''' , __lowercase ):
raise ValueError('''The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!''' )
def SCREAMING_SNAKE_CASE ( self ) -> int:
a =super().to_dict()
if isinstance(self.esmfold_config , __lowercase ):
a =self.esmfold_config.to_dict()
return output
@dataclass
class __A :
"""simple docstring"""
__lowerCAmelCase = None
__lowerCAmelCase = True
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = 0
__lowerCAmelCase = True
__lowerCAmelCase = False
__lowerCAmelCase = 128
__lowerCAmelCase = None
def SCREAMING_SNAKE_CASE ( self ) -> str:
if self.trunk is None:
a =TrunkConfig()
elif isinstance(self.trunk , __lowercase ):
a =TrunkConfig(**self.trunk )
def SCREAMING_SNAKE_CASE ( self ) -> int:
a =asdict(self )
a =self.trunk.to_dict()
return output
@dataclass
class __A :
"""simple docstring"""
__lowerCAmelCase = 48
__lowerCAmelCase = 1024
__lowerCAmelCase = 128
__lowerCAmelCase = 32
__lowerCAmelCase = 32
__lowerCAmelCase = 32
__lowerCAmelCase = 0
__lowerCAmelCase = 0
__lowerCAmelCase = False
__lowerCAmelCase = 4
__lowerCAmelCase = 128
__lowerCAmelCase = None
def SCREAMING_SNAKE_CASE ( self ) -> int:
if self.structure_module is None:
a =StructureModuleConfig()
elif isinstance(self.structure_module , __lowercase ):
a =StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(f'''`max_recycles` should be positive, got {self.max_recycles}.''' )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
'''`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got'''
f''' {self.sequence_state_dim} and {self.sequence_state_dim}.''' )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
'''`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got'''
f''' {self.pairwise_state_dim} and {self.pairwise_state_dim}.''' )
a =self.sequence_state_dim // self.sequence_head_width
a =self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
'''`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got'''
f''' {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.''' )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
'''`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got'''
f''' {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.''' )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(f'''`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.''' )
if self.dropout >= 0.4:
raise ValueError(f'''`dropout` should not be greater than 0.4, got {self.dropout}.''' )
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
a =asdict(self )
a =self.structure_module.to_dict()
return output
@dataclass
class __A :
"""simple docstring"""
__lowerCAmelCase = 384
__lowerCAmelCase = 128
__lowerCAmelCase = 16
__lowerCAmelCase = 128
__lowerCAmelCase = 12
__lowerCAmelCase = 4
__lowerCAmelCase = 8
__lowerCAmelCase = 0.1
__lowerCAmelCase = 8
__lowerCAmelCase = 1
__lowerCAmelCase = 2
__lowerCAmelCase = 7
__lowerCAmelCase = 10
__lowerCAmelCase = 1e-8
__lowerCAmelCase = 1e5
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
return asdict(self )
def _A ( ):
"""simple docstring"""
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 81
|
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import torch
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
@dataclass
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
__lowerCamelCase : Union[List[np.ndarray], torch.FloatTensor]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_text_to_video_synth import TextToVideoSDPipeline
from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401
from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
| 302
| 0
|
"""simple docstring"""
from statistics import mean
import numpy as np
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Dict:
lowercase__ : Optional[Any] = 0
# Number of processes finished
lowercase__ : str = 0
# Displays the finished process.
# If it is 0, the performance is completed if it is 1, before the performance.
lowercase__ : List[Any] = [0] * no_of_process
# List to include calculation results
lowercase__ : Optional[Any] = [0] * no_of_process
# Sort by arrival time.
lowercase__ : Union[str, Any] = [burst_time[i] for i in np.argsort(_SCREAMING_SNAKE_CASE )]
lowercase__ : str = [process_name[i] for i in np.argsort(_SCREAMING_SNAKE_CASE )]
arrival_time.sort()
while no_of_process > finished_process_count:
lowercase__ : Union[str, Any] = 0
while finished_process[i] == 1:
i += 1
if current_time < arrival_time[i]:
lowercase__ : Dict = arrival_time[i]
lowercase__ : Optional[int] = 0
# Index showing the location of the process being performed
lowercase__ : Optional[Any] = 0
# Saves the current response ratio.
lowercase__ : Any = 0
for i in range(0 , _SCREAMING_SNAKE_CASE ):
if finished_process[i] == 0 and arrival_time[i] <= current_time:
lowercase__ : Optional[int] = (burst_time[i] + (current_time - arrival_time[i])) / burst_time[
i
]
if response_ratio < temp:
lowercase__ : Optional[int] = temp
lowercase__ : Tuple = i
# Calculate the turn around time
lowercase__ : Any = current_time + burst_time[loc] - arrival_time[loc]
current_time += burst_time[loc]
# Indicates that the process has been performed.
lowercase__ : Union[str, Any] = 1
# Increase finished_process_count by 1
finished_process_count += 1
return turn_around_time
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> List[str]:
lowercase__ : List[str] = [0] * no_of_process
for i in range(0 , _SCREAMING_SNAKE_CASE ):
lowercase__ : Union[str, Any] = turn_around_time[i] - burst_time[i]
return waiting_time
if __name__ == "__main__":
lowerCAmelCase_ = 5
lowerCAmelCase_ = ['A', 'B', 'C', 'D', 'E']
lowerCAmelCase_ = [1, 2, 3, 4, 5]
lowerCAmelCase_ = [1, 2, 3, 4, 5]
lowerCAmelCase_ = calculate_turn_around_time(
process_name, arrival_time, burst_time, no_of_process
)
lowerCAmelCase_ = calculate_waiting_time(
process_name, turn_around_time, burst_time, no_of_process
)
print('Process name \tArrival time \tBurst time \tTurn around time \tWaiting time')
for i in range(0, no_of_process):
print(
F'''{process_name[i]}\t\t{arrival_time[i]}\t\t{burst_time[i]}\t\t'''
F'''{turn_around_time[i]}\t\t\t{waiting_time[i]}'''
)
print(F'''average waiting time : {mean(waiting_time):.5f}''')
print(F'''average turn around time : {mean(turn_around_time):.5f}''')
| 16
|
import string
import numpy
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
return b if a == 0 else greatest_common_divisor(b % a , _SCREAMING_SNAKE_CASE )
class SCREAMING_SNAKE_CASE :
__lowerCamelCase : List[str] =string.ascii_uppercase + string.digits
# This cipher takes alphanumerics into account
# i.e. a total of 36 characters
# take x and return x % len(key_string)
__lowerCamelCase : List[Any] =numpy.vectorize(lambda lowerCamelCase__ : x % 36 )
__lowerCamelCase : Optional[Any] =numpy.vectorize(lowerCamelCase__ )
def __init__( self : Union[str, Any] , __lowercase : numpy.ndarray ):
'''simple docstring'''
__a = self.modulus(__lowercase ) # mod36 calc's on the encrypt key
self.check_determinant() # validate the determinant of the encryption key
__a = encrypt_key.shape[0]
def UpperCamelCase_ ( self : Dict , __lowercase : str ):
'''simple docstring'''
return self.key_string.index(__lowercase )
def UpperCamelCase_ ( self : Dict , __lowercase : int ):
'''simple docstring'''
return self.key_string[round(__lowercase )]
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
__a = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
__a = det % len(self.key_string )
__a = len(self.key_string )
if greatest_common_divisor(__lowercase , len(self.key_string ) ) != 1:
__a = (
F"determinant modular {req_l} of encryption key({det}) "
F"is not co prime w.r.t {req_l}.\nTry another key."
)
raise ValueError(__lowercase )
def UpperCamelCase_ ( self : Dict , __lowercase : str ):
'''simple docstring'''
__a = [char for char in text.upper() if char in self.key_string]
__a = chars[-1]
while len(__lowercase ) % self.break_key != 0:
chars.append(__lowercase )
return "".join(__lowercase )
def UpperCamelCase_ ( self : List[str] , __lowercase : str ):
'''simple docstring'''
__a = self.process_text(text.upper() )
__a = """"""
for i in range(0 , len(__lowercase ) - self.break_key + 1 , self.break_key ):
__a = text[i : i + self.break_key]
__a = [self.replace_letters(__lowercase ) for char in batch]
__a = numpy.array([vec] ).T
__a = self.modulus(self.encrypt_key.dot(__lowercase ) ).T.tolist()[
0
]
__a = """""".join(
self.replace_digits(__lowercase ) for num in batch_encrypted )
encrypted += encrypted_batch
return encrypted
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
__a = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
__a = det % len(self.key_string )
__a = None
for i in range(len(self.key_string ) ):
if (det * i) % len(self.key_string ) == 1:
__a = i
break
__a = (
det_inv
* numpy.linalg.det(self.encrypt_key )
* numpy.linalg.inv(self.encrypt_key )
)
return self.to_int(self.modulus(__lowercase ) )
def UpperCamelCase_ ( self : Any , __lowercase : str ):
'''simple docstring'''
__a = self.make_decrypt_key()
__a = self.process_text(text.upper() )
__a = """"""
for i in range(0 , len(__lowercase ) - self.break_key + 1 , self.break_key ):
__a = text[i : i + self.break_key]
__a = [self.replace_letters(__lowercase ) for char in batch]
__a = numpy.array([vec] ).T
__a = self.modulus(decrypt_key.dot(__lowercase ) ).T.tolist()[0]
__a = """""".join(
self.replace_digits(__lowercase ) for num in batch_decrypted )
decrypted += decrypted_batch
return decrypted
def lowerCAmelCase__ ( ):
"""simple docstring"""
__a = int(input("""Enter the order of the encryption key: """ ) )
__a = []
print("""Enter each row of the encryption key with space separated integers""" )
for _ in range(_SCREAMING_SNAKE_CASE ):
__a = [int(_SCREAMING_SNAKE_CASE ) for x in input().split()]
hill_matrix.append(_SCREAMING_SNAKE_CASE )
__a = HillCipher(numpy.array(_SCREAMING_SNAKE_CASE ) )
print("""Would you like to encrypt or decrypt some text? (1 or 2)""" )
__a = input("""\n1. Encrypt\n2. Decrypt\n""" )
if option == "1":
__a = input("""What text would you like to encrypt?: """ )
print("""Your encrypted text is:""" )
print(hc.encrypt(_SCREAMING_SNAKE_CASE ) )
elif option == "2":
__a = input("""What text would you like to decrypt?: """ )
print("""Your decrypted text is:""" )
print(hc.decrypt(_SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 302
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
snake_case : List[Any] = {
"configuration_blip": [
"BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlipConfig",
"BlipTextConfig",
"BlipVisionConfig",
],
"processing_blip": ["BlipProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Optional[Any] = ["BlipImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Optional[Any] = [
"BLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlipModel",
"BlipPreTrainedModel",
"BlipForConditionalGeneration",
"BlipForQuestionAnswering",
"BlipVisionModel",
"BlipTextModel",
"BlipForImageTextRetrieval",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : int = [
"TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFBlipModel",
"TFBlipPreTrainedModel",
"TFBlipForConditionalGeneration",
"TFBlipForQuestionAnswering",
"TFBlipVisionModel",
"TFBlipTextModel",
"TFBlipForImageTextRetrieval",
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
snake_case : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 281
|
from typing import List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"""huggingface/autoformer-tourism-monthly""": """https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json""",
}
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
__lowerCamelCase : List[Any] ='autoformer'
__lowerCamelCase : str ={
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
'num_hidden_layers': 'encoder_layers',
}
def __init__( self : List[Any] , __lowercase : Optional[int] = None , __lowercase : Optional[int] = None , __lowercase : str = "student_t" , __lowercase : str = "nll" , __lowercase : int = 1 , __lowercase : List[int] = [1, 2, 3, 4, 5, 6, 7] , __lowercase : bool = True , __lowercase : int = 0 , __lowercase : int = 0 , __lowercase : int = 0 , __lowercase : int = 0 , __lowercase : Optional[List[int]] = None , __lowercase : Optional[List[int]] = None , __lowercase : int = 64 , __lowercase : int = 2 , __lowercase : int = 2 , __lowercase : int = 2 , __lowercase : int = 2 , __lowercase : int = 32 , __lowercase : int = 32 , __lowercase : str = "gelu" , __lowercase : float = 0.1 , __lowercase : float = 0.1 , __lowercase : float = 0.1 , __lowercase : float = 0.1 , __lowercase : float = 0.1 , __lowercase : int = 100 , __lowercase : float = 0.02 , __lowercase : bool = True , __lowercase : List[Any]=True , __lowercase : int = 10 , __lowercase : int = 25 , __lowercase : int = 3 , **__lowercase : Optional[int] , ):
'''simple docstring'''
# time series specific configuration
__a = prediction_length
__a = context_length if context_length is not None else prediction_length
__a = distribution_output
__a = loss
__a = input_size
__a = num_time_features
__a = lags_sequence
__a = scaling
__a = num_dynamic_real_features
__a = num_static_real_features
__a = num_static_categorical_features
if cardinality is not None and num_static_categorical_features > 0:
if len(__lowercase ) != num_static_categorical_features:
raise ValueError(
"""The cardinality should be a list of the same length as `num_static_categorical_features`""" )
__a = cardinality
else:
__a = [0]
if embedding_dimension is not None and num_static_categorical_features > 0:
if len(__lowercase ) != num_static_categorical_features:
raise ValueError(
"""The embedding dimension should be a list of the same length as `num_static_categorical_features`""" )
__a = embedding_dimension
else:
__a = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
__a = num_parallel_samples
# Transformer architecture configuration
__a = input_size * len(self.lags_sequence ) + self._number_of_features
__a = d_model
__a = encoder_attention_heads
__a = decoder_attention_heads
__a = encoder_ffn_dim
__a = decoder_ffn_dim
__a = encoder_layers
__a = decoder_layers
__a = dropout
__a = attention_dropout
__a = activation_dropout
__a = encoder_layerdrop
__a = decoder_layerdrop
__a = activation_function
__a = init_std
__a = use_cache
# Autoformer
__a = label_length
__a = moving_average
__a = autocorrelation_factor
super().__init__(is_encoder_decoder=__lowercase , **__lowercase )
@property
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 302
| 0
|
'''simple docstring'''
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
_A : Any =logging.get_logger(__name__)
_A : Tuple =Dict[str, Any]
_A : Tuple =List[Prediction]
@add_end_docstrings(lowerCamelCase__ )
class _lowercase ( lowerCamelCase__ ):
def __init__( self: Tuple , *UpperCamelCase__: Tuple , **UpperCamelCase__: Optional[int] ):
super().__init__(*__lowercase , **__lowercase )
if self.framework == "tf":
raise ValueError(F'''The {self.__class__} is only available in PyTorch.''' )
requires_backends(self , """vision""" )
self.check_model_type(
dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) )
def lowerCamelCase_ ( self: Optional[int] , **UpperCamelCase__: List[str] ):
lowerCamelCase__ : str = {}
if "threshold" in kwargs:
lowerCamelCase__ : Tuple = kwargs["""threshold"""]
return {}, {}, postprocess_kwargs
def __call__( self: List[Any] , *UpperCamelCase__: Any , **UpperCamelCase__: Tuple ):
return super().__call__(*__lowercase , **__lowercase )
def lowerCamelCase_ ( self: str , UpperCamelCase__: Tuple ):
lowerCamelCase__ : Tuple = load_image(__lowercase )
lowerCamelCase__ : List[Any] = torch.IntTensor([[image.height, image.width]] )
lowerCamelCase__ : Tuple = self.image_processor(images=[image] , return_tensors="""pt""" )
if self.tokenizer is not None:
lowerCamelCase__ : Tuple = self.tokenizer(text=inputs["""words"""] , boxes=inputs["""boxes"""] , return_tensors="""pt""" )
lowerCamelCase__ : Optional[Any] = target_size
return inputs
def lowerCamelCase_ ( self: Dict , UpperCamelCase__: List[str] ):
lowerCamelCase__ : Union[str, Any] = model_inputs.pop("""target_size""" )
lowerCamelCase__ : Dict = self.model(**__lowercase )
lowerCamelCase__ : Optional[Any] = outputs.__class__({"""target_size""": target_size, **outputs} )
if self.tokenizer is not None:
lowerCamelCase__ : Dict = model_inputs["""bbox"""]
return model_outputs
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase__: List[Any] , UpperCamelCase__: List[Any]=0.9 ):
lowerCamelCase__ : str = model_outputs["""target_size"""]
if self.tokenizer is not None:
# This is a LayoutLMForTokenClassification variant.
# The OCR got the boxes and the model classified the words.
lowerCamelCase__ , lowerCamelCase__ : List[Any] = target_size[0].tolist()
def unnormalize(UpperCamelCase__: Optional[Any] ):
return self._get_bounding_box(
torch.Tensor(
[
(width * bbox[0] / 1_000),
(height * bbox[1] / 1_000),
(width * bbox[2] / 1_000),
(height * bbox[3] / 1_000),
] ) )
lowerCamelCase__ , lowerCamelCase__ : List[Any] = model_outputs["""logits"""].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 )
lowerCamelCase__ : Tuple = [self.model.config.idalabel[prediction] for prediction in classes.tolist()]
lowerCamelCase__ : Any = [unnormalize(__lowercase ) for bbox in model_outputs["""bbox"""].squeeze(0 )]
lowerCamelCase__ : List[str] = ["""score""", """label""", """box"""]
lowerCamelCase__ : Optional[int] = [dict(zip(__lowercase , __lowercase ) ) for vals in zip(scores.tolist() , __lowercase , __lowercase ) if vals[0] > threshold]
else:
# This is a regular ForObjectDetectionModel
lowerCamelCase__ : str = self.image_processor.post_process_object_detection(__lowercase , __lowercase , __lowercase )
lowerCamelCase__ : Dict = raw_annotations[0]
lowerCamelCase__ : Union[str, Any] = raw_annotation["""scores"""]
lowerCamelCase__ : Any = raw_annotation["""labels"""]
lowerCamelCase__ : List[str] = raw_annotation["""boxes"""]
lowerCamelCase__ : Any = scores.tolist()
lowerCamelCase__ : int = [self.model.config.idalabel[label.item()] for label in labels]
lowerCamelCase__ : Union[str, Any] = [self._get_bounding_box(__lowercase ) for box in boxes]
# {"scores": [...], ...} --> [{"score":x, ...}, ...]
lowerCamelCase__ : List[Any] = ["""score""", """label""", """box"""]
lowerCamelCase__ : Tuple = [
dict(zip(__lowercase , __lowercase ) )
for vals in zip(raw_annotation["""scores"""] , raw_annotation["""labels"""] , raw_annotation["""boxes"""] )
]
return annotation
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase__: "torch.Tensor" ):
if self.framework != "pt":
raise ValueError("""The ObjectDetectionPipeline is only available in PyTorch.""" )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : List[str] = box.int().tolist()
lowerCamelCase__ : str = {
"""xmin""": xmin,
"""ymin""": ymin,
"""xmax""": xmax,
"""ymax""": ymax,
}
return bbox
| 41
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase__ = {
"""configuration_electra""": ["""ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ElectraConfig""", """ElectraOnnxConfig"""],
"""tokenization_electra""": ["""ElectraTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""ElectraTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ElectraForCausalLM""",
"""ElectraForMaskedLM""",
"""ElectraForMultipleChoice""",
"""ElectraForPreTraining""",
"""ElectraForQuestionAnswering""",
"""ElectraForSequenceClassification""",
"""ElectraForTokenClassification""",
"""ElectraModel""",
"""ElectraPreTrainedModel""",
"""load_tf_weights_in_electra""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFElectraForMaskedLM""",
"""TFElectraForMultipleChoice""",
"""TFElectraForPreTraining""",
"""TFElectraForQuestionAnswering""",
"""TFElectraForSequenceClassification""",
"""TFElectraForTokenClassification""",
"""TFElectraModel""",
"""TFElectraPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""FlaxElectraForCausalLM""",
"""FlaxElectraForMaskedLM""",
"""FlaxElectraForMultipleChoice""",
"""FlaxElectraForPreTraining""",
"""FlaxElectraForQuestionAnswering""",
"""FlaxElectraForSequenceClassification""",
"""FlaxElectraForTokenClassification""",
"""FlaxElectraModel""",
"""FlaxElectraPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 302
| 0
|
from abc import ABC, abstractmethod
from typing import List, Optional
class SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ):
def __init__( self ):
self.test()
def UpperCamelCase ( self ):
A__ = 0
A__ = False
while not completed:
if counter == 1:
self.reset()
A__ = self.advance()
if not self.does_advance(__lowercase ):
raise Exception(
'''Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.''' )
A__ , A__ , A__ = self.update(__lowercase )
counter += 1
if counter > 1_0000:
raise Exception('''update() does not fulfill the constraint.''' )
if self.remaining() != 0:
raise Exception('''Custom Constraint is not defined correctly.''' )
@abstractmethod
def UpperCamelCase ( self ):
raise NotImplementedError(
f"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
@abstractmethod
def UpperCamelCase ( self,__lowerCamelCase ):
raise NotImplementedError(
f"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
@abstractmethod
def UpperCamelCase ( self,__lowerCamelCase ):
raise NotImplementedError(
f"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
@abstractmethod
def UpperCamelCase ( self ):
raise NotImplementedError(
f"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
@abstractmethod
def UpperCamelCase ( self ):
raise NotImplementedError(
f"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
@abstractmethod
def UpperCamelCase ( self,__lowerCamelCase=False ):
raise NotImplementedError(
f"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
class SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ):
def __init__( self,__lowerCamelCase ):
super(__lowercase,self ).__init__()
if not isinstance(__lowercase,__lowercase ) or len(__lowercase ) == 0:
raise ValueError(f"`token_ids` has to be a non-empty list, but is {token_ids}." )
if any((not isinstance(__lowercase,__lowercase ) or token_id < 0) for token_id in token_ids ):
raise ValueError(f"Each list in `token_ids` has to be a list of positive integers, but is {token_ids}." )
A__ = token_ids
A__ = len(self.token_ids )
A__ = -1 # the index of the currently fulfilled step
A__ = False
def UpperCamelCase ( self ):
if self.completed:
return None
return self.token_ids[self.fulfilled_idx + 1]
def UpperCamelCase ( self,__lowerCamelCase ):
if not isinstance(__lowercase,__lowercase ):
raise ValueError(f"`token_id` has to be an `int`, but is {token_id} of type {type(__lowercase )}" )
if self.completed:
return False
return token_id == self.token_ids[self.fulfilled_idx + 1]
def UpperCamelCase ( self,__lowerCamelCase ):
if not isinstance(__lowercase,__lowercase ):
raise ValueError(f"`token_id` has to be an `int`, but is {token_id} of type {type(__lowercase )}" )
A__ = False
A__ = False
A__ = False
if self.does_advance(__lowercase ):
self.fulfilled_idx += 1
A__ = True
if self.fulfilled_idx == (self.seqlen - 1):
A__ = True
A__ = completed
else:
# failed to make progress.
A__ = True
self.reset()
return stepped, completed, reset
def UpperCamelCase ( self ):
A__ = False
A__ = 0
def UpperCamelCase ( self ):
return self.seqlen - (self.fulfilled_idx + 1)
def UpperCamelCase ( self,__lowerCamelCase=False ):
A__ = PhrasalConstraint(self.token_ids )
if stateful:
A__ = self.seqlen
A__ = self.fulfilled_idx
A__ = self.completed
return new_constraint
class SCREAMING_SNAKE_CASE__ :
def __init__( self,__lowerCamelCase,__lowerCamelCase=True ):
A__ = max([len(__lowercase ) for one in nested_token_ids] )
A__ = {}
for token_ids in nested_token_ids:
A__ = root
for tidx, token_id in enumerate(__lowercase ):
if token_id not in level:
A__ = {}
A__ = level[token_id]
if no_subsets and self.has_subsets(__lowercase,__lowercase ):
raise ValueError(
'''Each list in `nested_token_ids` can\'t be a complete subset of another list, but is'''
f" {nested_token_ids}." )
A__ = root
def UpperCamelCase ( self,__lowerCamelCase ):
A__ = self.trie
for current_token in current_seq:
A__ = start[current_token]
A__ = list(start.keys() )
return next_tokens
def UpperCamelCase ( self,__lowerCamelCase ):
A__ = self.next_tokens(__lowercase )
return len(__lowercase ) == 0
def UpperCamelCase ( self,__lowerCamelCase ):
A__ = list(root.values() )
if len(__lowercase ) == 0:
return 1
else:
return sum([self.count_leaves(__lowercase ) for nn in next_nodes] )
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase ):
A__ = self.count_leaves(__lowercase )
return len(__lowercase ) != leaf_count
class SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ):
def __init__( self,__lowerCamelCase ):
super(__lowercase,self ).__init__()
if not isinstance(__lowercase,__lowercase ) or len(__lowercase ) == 0:
raise ValueError(f"`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}." )
if any(not isinstance(__lowercase,__lowercase ) for token_ids in nested_token_ids ):
raise ValueError(f"`nested_token_ids` has to be a list of lists, but is {nested_token_ids}." )
if any(
any((not isinstance(__lowercase,__lowercase ) or token_id < 0) for token_id in token_ids )
for token_ids in nested_token_ids ):
raise ValueError(
f"Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}." )
A__ = DisjunctiveTrie(__lowercase )
A__ = nested_token_ids
A__ = self.trie.max_height
A__ = []
A__ = False
def UpperCamelCase ( self ):
A__ = self.trie.next_tokens(self.current_seq )
if len(__lowercase ) == 0:
return None
else:
return token_list
def UpperCamelCase ( self,__lowerCamelCase ):
if not isinstance(__lowercase,__lowercase ):
raise ValueError(f"`token_id` is supposed to be type `int`, but is {token_id} of type {type(__lowercase )}" )
A__ = self.trie.next_tokens(self.current_seq )
return token_id in next_tokens
def UpperCamelCase ( self,__lowerCamelCase ):
if not isinstance(__lowercase,__lowercase ):
raise ValueError(f"`token_id` is supposed to be type `int`, but is {token_id} of type {type(__lowercase )}" )
A__ = False
A__ = False
A__ = False
if self.does_advance(__lowercase ):
self.current_seq.append(__lowercase )
A__ = True
else:
A__ = True
self.reset()
A__ = self.trie.reached_leaf(self.current_seq )
A__ = completed
return stepped, completed, reset
def UpperCamelCase ( self ):
A__ = False
A__ = []
def UpperCamelCase ( self ):
if self.completed:
# since this can be completed without reaching max height
return 0
else:
return self.seqlen - len(self.current_seq )
def UpperCamelCase ( self,__lowerCamelCase=False ):
A__ = DisjunctiveConstraint(self.token_ids )
if stateful:
A__ = self.seqlen
A__ = self.current_seq
A__ = self.completed
return new_constraint
class SCREAMING_SNAKE_CASE__ :
def __init__( self,__lowerCamelCase ):
A__ = constraints
# max # of steps required to fulfill a given constraint
A__ = max([c.seqlen for c in constraints] )
A__ = len(__lowercase )
A__ = False
self.init_state()
def UpperCamelCase ( self ):
A__ = []
A__ = None
A__ = [constraint.copy(stateful=__lowercase ) for constraint in self.constraints]
def UpperCamelCase ( self ):
A__ = 0
if self.inprogress_constraint:
# extra points for having a constraint mid-fulfilled
add += self.max_seqlen - self.inprogress_constraint.remaining()
return (len(self.complete_constraints ) * self.max_seqlen) + add
def UpperCamelCase ( self ):
A__ = []
if self.inprogress_constraint is None:
for constraint in self.pending_constraints: # "pending" == "unfulfilled yet"
A__ = constraint.advance()
if isinstance(__lowercase,__lowercase ):
token_list.append(__lowercase )
elif isinstance(__lowercase,__lowercase ):
token_list.extend(__lowercase )
else:
A__ = self.inprogress_constraint.advance()
if isinstance(__lowercase,__lowercase ):
token_list.append(__lowercase )
elif isinstance(__lowercase,__lowercase ):
token_list.extend(__lowercase )
if len(__lowercase ) == 0:
return None
else:
return token_list
def UpperCamelCase ( self,__lowerCamelCase ):
self.init_state()
if token_ids is not None:
for token in token_ids:
# completes or steps **one** constraint
A__ , A__ = self.add(__lowercase )
# the entire list of constraints are fulfilled
if self.completed:
break
def UpperCamelCase ( self,__lowerCamelCase ):
if not isinstance(__lowercase,__lowercase ):
raise ValueError(f"`token_id` should be an `int`, but is `{token_id}`." )
A__ , A__ = False, False
if self.completed:
A__ = True
A__ = False
return complete, stepped
if self.inprogress_constraint is not None:
# In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current
# job, simply update the state
A__ , A__ , A__ = self.inprogress_constraint.update(__lowercase )
if reset:
# 1. If the next token breaks the progress, then we must restart.
# e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books".
# But that doesn't mean we self.init_state(), since we only reset the state for this particular
# constraint, not the full list of constraints.
self.pending_constraints.append(self.inprogress_constraint.copy(stateful=__lowercase ) )
A__ = None
if complete:
# 2. If the next token completes the constraint, move it to completed list, set
# inprogress to None. If there are no pending constraints either, then this full list of constraints
# is complete.
self.complete_constraints.append(self.inprogress_constraint )
A__ = None
if len(self.pending_constraints ) == 0:
# we're done!
A__ = True
else:
# Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list
# of constraints?
for cidx, pending_constraint in enumerate(self.pending_constraints ):
if pending_constraint.does_advance(__lowercase ):
A__ , A__ , A__ = pending_constraint.update(__lowercase )
if not stepped:
raise Exception(
'''`constraint.update(token_id)` is not yielding incremental progress, '''
'''even though `constraint.does_advance(token_id)` is true.''' )
if complete:
self.complete_constraints.append(__lowercase )
A__ = None
if not complete and stepped:
A__ = pending_constraint
if complete or stepped:
# If we made any progress at all, then it's at least not a "pending constraint".
A__ = (
self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :]
)
if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None:
# If there's no longer any pending after this and no inprogress either, then we must be
# complete.
A__ = True
break # prevent accidentally stepping through multiple constraints with just one token.
return complete, stepped
def UpperCamelCase ( self,__lowerCamelCase=True ):
A__ = ConstraintListState(self.constraints ) # we actually never though self.constraints objects
# throughout this process. So it's at initialization state.
if stateful:
A__ = [
constraint.copy(stateful=__lowercase ) for constraint in self.complete_constraints
]
if self.inprogress_constraint is not None:
A__ = self.inprogress_constraint.copy(stateful=__lowercase )
A__ = [constraint.copy() for constraint in self.pending_constraints]
return new_state
| 193
|
from __future__ import annotations
lowerCamelCase__ = """#"""
class SCREAMING_SNAKE_CASE :
def __init__( self : Optional[Any] ):
'''simple docstring'''
__a = {}
def UpperCamelCase_ ( self : Optional[Any] , __lowercase : str ):
'''simple docstring'''
__a = self._trie
for char in text:
if char not in trie:
__a = {}
__a = trie[char]
__a = True
def UpperCamelCase_ ( self : Tuple , __lowercase : str ):
'''simple docstring'''
__a = self._trie
for char in prefix:
if char in trie:
__a = trie[char]
else:
return []
return self._elements(__lowercase )
def UpperCamelCase_ ( self : Optional[int] , __lowercase : dict ):
'''simple docstring'''
__a = []
for c, v in d.items():
__a = [""" """] if c == END else [(c + s) for s in self._elements(__lowercase )]
result.extend(__lowercase )
return tuple(__lowercase )
lowerCamelCase__ = Trie()
lowerCamelCase__ = ("""depart""", """detergent""", """daring""", """dog""", """deer""", """deal""")
for word in words:
trie.insert_word(word)
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
__a = trie.find_word(_SCREAMING_SNAKE_CASE )
return tuple(string + word for word in suffixes )
def lowerCAmelCase__ ( ):
"""simple docstring"""
print(autocomplete_using_trie("""de""" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 302
| 0
|
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class snake_case__ ( lowerCamelCase__ ):
lowercase__ : Dict = ''
lowercase__ : str = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
lowercase__ : str = None # compression type in fsspec. ex: "gzip"
lowercase__ : str = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self , lowerCAmelCase__ = "" , lowerCAmelCase__ = None , lowerCAmelCase__ = None , **lowerCAmelCase__ ) -> Optional[int]:
super().__init__(self , **__lowercase )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
__magic_name__ : Union[str, Any] = fsspec.open(
__lowercase , mode="""rb""" , protocol=__lowercase , compression=self.compression , client_kwargs={
"""requote_redirect_url""": False, # see https://github.com/huggingface/datasets/pull/5459
"""trust_env""": True, # Enable reading proxy env variables.
**(target_options or {}).pop("""client_kwargs""" , {} ), # To avoid issues if it was already passed.
} , **(target_options or {}) , )
__magic_name__ : Union[str, Any] = os.path.basename(self.file.path.split("""::""" )[0] )
__magic_name__ : Optional[Any] = (
self.compressed_name[: self.compressed_name.rindex(""".""" )]
if """.""" in self.compressed_name
else self.compressed_name
)
__magic_name__ : Optional[Any] = None
@classmethod
def __magic_name__ ( cls , lowerCAmelCase__ ) -> Dict:
return super()._strip_protocol(__lowercase ).lstrip("""/""" )
def __magic_name__ ( self ) -> List[str]:
if self.dir_cache is None:
__magic_name__ : str = {**self.file.fs.info(self.file.path ), """name""": self.uncompressed_name}
__magic_name__ : Union[str, Any] = {f["""name"""]: f}
def __magic_name__ ( self , lowerCAmelCase__ ) -> List[Any]:
return self.file.open().read()
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = "rb" , lowerCAmelCase__=None , lowerCAmelCase__=True , lowerCAmelCase__=None , **lowerCAmelCase__ , ) -> Optional[Any]:
__magic_name__ : Dict = self._strip_protocol(__lowercase )
if mode != "rb":
raise ValueError(F'Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'' )
return self.file.open()
class snake_case__ ( lowerCamelCase__ ):
lowercase__ : str = 'bz2'
lowercase__ : Any = 'bz2'
lowercase__ : List[str] = '.bz2'
class snake_case__ ( lowerCamelCase__ ):
lowercase__ : List[Any] = 'gzip'
lowercase__ : Optional[Any] = 'gzip'
lowercase__ : List[Any] = '.gz'
class snake_case__ ( lowerCamelCase__ ):
lowercase__ : List[str] = 'lz4'
lowercase__ : Optional[Any] = 'lz4'
lowercase__ : Dict = '.lz4'
class snake_case__ ( lowerCamelCase__ ):
lowercase__ : str = 'xz'
lowercase__ : Optional[Any] = 'xz'
lowercase__ : Dict = '.xz'
class snake_case__ ( lowerCamelCase__ ):
lowercase__ : List[Any] = 'zstd'
lowercase__ : str = 'zstd'
lowercase__ : List[Any] = '.zst'
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ = "rb" , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = DEFAULT_BLOCK_SIZE , **lowerCAmelCase__ , ) -> Optional[Any]:
super().__init__(
fo=__lowercase , mode=__lowercase , target_protocol=__lowercase , target_options=__lowercase , block_size=__lowercase , **__lowercase , )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
__magic_name__ : int = self.file.__enter__
class snake_case__ :
def __init__( self , lowerCAmelCase__ ) -> List[Any]:
__magic_name__ : Tuple = file_
def __enter__( self ) -> int:
self._file.__enter__()
return self
def __exit__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Dict:
self._file.__exit__(*__lowercase , **__lowercase )
def __iter__( self ) -> str:
return iter(self._file )
def __magic_name__ ( self ) -> Optional[Any]:
return next(self._file )
def __getattr__( self , lowerCAmelCase__ ) -> Tuple:
return getattr(self._file , __lowercase )
def fixed_enter(*lowerCAmelCase__ , **lowerCAmelCase__ ):
return WrappedFile(_enter(*__lowercase , **__lowercase ) )
__magic_name__ : List[str] = fixed_enter
| 342
|
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
__lowerCamelCase : torch.FloatTensor
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ , lowerCamelCase__ ):
@register_to_config
def __init__( self : Dict , __lowercase : int = 32 , __lowercase : int = 64 , __lowercase : int = 20 , __lowercase : int = 768 , __lowercase : Any=77 , __lowercase : Optional[int]=4 , __lowercase : float = 0.0 , __lowercase : str = "silu" , __lowercase : Optional[str] = None , __lowercase : Optional[str] = None , __lowercase : Optional[str] = "linear" , __lowercase : Optional[str] = "prd" , __lowercase : Optional[int] = None , __lowercase : Optional[int] = None , __lowercase : Optional[int] = None , ):
'''simple docstring'''
super().__init__()
__a = num_attention_heads
__a = attention_head_dim
__a = num_attention_heads * attention_head_dim
__a = additional_embeddings
__a = time_embed_dim or inner_dim
__a = embedding_proj_dim or embedding_dim
__a = clip_embed_dim or embedding_dim
__a = Timesteps(__lowercase , __lowercase , 0 )
__a = TimestepEmbedding(__lowercase , __lowercase , out_dim=__lowercase , act_fn=__lowercase )
__a = nn.Linear(__lowercase , __lowercase )
if embedding_proj_norm_type is None:
__a = None
elif embedding_proj_norm_type == "layer":
__a = nn.LayerNorm(__lowercase )
else:
raise ValueError(F"unsupported embedding_proj_norm_type: {embedding_proj_norm_type}" )
__a = nn.Linear(__lowercase , __lowercase )
if encoder_hid_proj_type is None:
__a = None
elif encoder_hid_proj_type == "linear":
__a = nn.Linear(__lowercase , __lowercase )
else:
raise ValueError(F"unsupported encoder_hid_proj_type: {encoder_hid_proj_type}" )
__a = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , __lowercase ) )
if added_emb_type == "prd":
__a = nn.Parameter(torch.zeros(1 , 1 , __lowercase ) )
elif added_emb_type is None:
__a = None
else:
raise ValueError(
F"`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `'prd'` or `None`." )
__a = nn.ModuleList(
[
BasicTransformerBlock(
__lowercase , __lowercase , __lowercase , dropout=__lowercase , activation_fn="""gelu""" , attention_bias=__lowercase , )
for d in range(__lowercase )
] )
if norm_in_type == "layer":
__a = nn.LayerNorm(__lowercase )
elif norm_in_type is None:
__a = None
else:
raise ValueError(F"Unsupported norm_in_type: {norm_in_type}." )
__a = nn.LayerNorm(__lowercase )
__a = nn.Linear(__lowercase , __lowercase )
__a = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -10000.0 )
causal_attention_mask.triu_(1 )
__a = causal_attention_mask[None, ...]
self.register_buffer("""causal_attention_mask""" , __lowercase , persistent=__lowercase )
__a = nn.Parameter(torch.zeros(1 , __lowercase ) )
__a = nn.Parameter(torch.zeros(1 , __lowercase ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
__a = {}
def fn_recursive_add_processors(__lowercase : str , __lowercase : torch.nn.Module , __lowercase : Dict[str, AttentionProcessor] ):
if hasattr(__lowercase , """set_processor""" ):
__a = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F"{name}.{sub_name}" , __lowercase , __lowercase )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(__lowercase , __lowercase , __lowercase )
return processors
def UpperCamelCase_ ( self : List[str] , __lowercase : Union[AttentionProcessor, Dict[str, AttentionProcessor]] ):
'''simple docstring'''
__a = len(self.attn_processors.keys() )
if isinstance(__lowercase , __lowercase ) and len(__lowercase ) != count:
raise ValueError(
F"A dict of processors was passed, but the number of processors {len(__lowercase )} does not match the"
F" number of attention layers: {count}. Please make sure to pass {count} processor classes." )
def fn_recursive_attn_processor(__lowercase : str , __lowercase : torch.nn.Module , __lowercase : Dict ):
if hasattr(__lowercase , """set_processor""" ):
if not isinstance(__lowercase , __lowercase ):
module.set_processor(__lowercase )
else:
module.set_processor(processor.pop(F"{name}.processor" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F"{name}.{sub_name}" , __lowercase , __lowercase )
for name, module in self.named_children():
fn_recursive_attn_processor(__lowercase , __lowercase , __lowercase )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
self.set_attn_processor(AttnProcessor() )
def UpperCamelCase_ ( self : Union[str, Any] , __lowercase : Optional[int] , __lowercase : Union[torch.Tensor, float, int] , __lowercase : torch.FloatTensor , __lowercase : Optional[torch.FloatTensor] = None , __lowercase : Optional[torch.BoolTensor] = None , __lowercase : bool = True , ):
'''simple docstring'''
__a = hidden_states.shape[0]
__a = timestep
if not torch.is_tensor(__lowercase ):
__a = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device )
elif torch.is_tensor(__lowercase ) and len(timesteps.shape ) == 0:
__a = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
__a = timesteps * torch.ones(__lowercase , dtype=timesteps.dtype , device=timesteps.device )
__a = self.time_proj(__lowercase )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
__a = timesteps_projected.to(dtype=self.dtype )
__a = self.time_embedding(__lowercase )
if self.embedding_proj_norm is not None:
__a = self.embedding_proj_norm(__lowercase )
__a = self.embedding_proj(__lowercase )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
__a = self.encoder_hidden_states_proj(__lowercase )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError("""`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set""" )
__a = self.proj_in(__lowercase )
__a = self.positional_embedding.to(hidden_states.dtype )
__a = []
__a = 0
if encoder_hidden_states is not None:
additional_embeds.append(__lowercase )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
__a = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
__a = hidden_states[:, None, :]
__a = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
__a = self.prd_embedding.to(hidden_states.dtype ).expand(__lowercase , -1 , -1 )
additional_embeds.append(__lowercase )
__a = torch.cat(
__lowercase , dim=1 , )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
__a = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
__a = F.pad(
__lowercase , (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) , value=0.0 , )
__a = hidden_states + positional_embeddings
if attention_mask is not None:
__a = (1 - attention_mask.to(hidden_states.dtype )) * -10000.0
__a = F.pad(__lowercase , (0, self.additional_embeddings) , value=0.0 )
__a = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
__a = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 )
if self.norm_in is not None:
__a = self.norm_in(__lowercase )
for block in self.transformer_blocks:
__a = block(__lowercase , attention_mask=__lowercase )
__a = self.norm_out(__lowercase )
if self.prd_embedding is not None:
__a = hidden_states[:, -1]
else:
__a = hidden_states[:, additional_embeddings_len:]
__a = self.proj_to_clip_embeddings(__lowercase )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=__lowercase )
def UpperCamelCase_ ( self : Any , __lowercase : Tuple ):
'''simple docstring'''
__a = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 302
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowerCAmelCase = {
'''configuration_mask2former''': [
'''MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''Mask2FormerConfig''',
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = ['''Mask2FormerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
'''MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Mask2FormerForUniversalSegmentation''',
'''Mask2FormerModel''',
'''Mask2FormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 37
|
from functools import lru_cache
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
__a = 2
__a = set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(_SCREAMING_SNAKE_CASE )
if n > 1:
factors.add(_SCREAMING_SNAKE_CASE )
return factors
@lru_cache
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
return len(unique_prime_factors(_SCREAMING_SNAKE_CASE ) )
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : list ):
"""simple docstring"""
return len(set(_SCREAMING_SNAKE_CASE ) ) in (0, 1)
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
__a = 2
while True:
# Increment each value of a generated range
__a = [base + i for i in range(_SCREAMING_SNAKE_CASE )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
__a = [upf_len(_SCREAMING_SNAKE_CASE ) for x in group]
checker.append(_SCREAMING_SNAKE_CASE )
# If all numbers in the list are equal, return the group variable.
if equality(_SCREAMING_SNAKE_CASE ):
return group
# Increment our base variable by 1
base += 1
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int = 4 ):
"""simple docstring"""
__a = run(_SCREAMING_SNAKE_CASE )
return results[0] if len(_SCREAMING_SNAKE_CASE ) else None
if __name__ == "__main__":
print(solution())
| 302
| 0
|
import unittest
from diffusers.models.unet_ad_blocks import * # noqa F403
from diffusers.utils import torch_device
from .test_unet_blocks_common import UNetBlockTesterMixin
class lowerCamelCase__ ( lowerCamelCase__ , unittest.TestCase):
'''simple docstring'''
_A = DownBlockaD # noqa F405
_A = 'down'
def _lowerCamelCase ( self :Tuple ) -> str:
__UpperCamelCase : Tuple = [-0.0232, -0.9869, 0.8054, -0.0637, -0.1688, -1.4264, 0.4470, -1.3394, 0.0904]
super().test_output(__lowercase )
class lowerCamelCase__ ( lowerCamelCase__ , unittest.TestCase):
'''simple docstring'''
_A = ResnetDownsampleBlockaD # noqa F405
_A = 'down'
def _lowerCamelCase ( self :Dict ) -> Tuple:
__UpperCamelCase : Tuple = [0.0710, 0.2410, -0.7320, -1.0757, -1.1343, 0.3540, -0.0133, -0.2576, 0.0948]
super().test_output(__lowercase )
class lowerCamelCase__ ( lowerCamelCase__ , unittest.TestCase):
'''simple docstring'''
_A = AttnDownBlockaD # noqa F405
_A = 'down'
def _lowerCamelCase ( self :str ) -> Any:
__UpperCamelCase : List[str] = [0.0636, 0.8964, -0.6234, -1.0131, 0.0844, 0.4935, 0.3437, 0.0911, -0.2957]
super().test_output(__lowercase )
class lowerCamelCase__ ( lowerCamelCase__ , unittest.TestCase):
'''simple docstring'''
_A = CrossAttnDownBlockaD # noqa F405
_A = 'down'
def _lowerCamelCase ( self :List[Any] ) -> Optional[int]:
__UpperCamelCase , __UpperCamelCase : Any = super().prepare_init_args_and_inputs_for_common()
__UpperCamelCase : str = 3_2
return init_dict, inputs_dict
def _lowerCamelCase ( self :Dict ) -> Optional[Any]:
__UpperCamelCase : Dict = [0.2238, -0.7396, -0.2255, -0.3829, 0.1925, 1.1665, 0.0603, -0.7295, 0.1983]
super().test_output(__lowercase )
class lowerCamelCase__ ( lowerCamelCase__ , unittest.TestCase):
'''simple docstring'''
_A = SimpleCrossAttnDownBlockaD # noqa F405
_A = 'down'
@property
def _lowerCamelCase ( self :List[str] ) -> List[str]:
return super().get_dummy_input(include_encoder_hidden_states=__lowercase )
def _lowerCamelCase ( self :Any ) -> Tuple:
__UpperCamelCase , __UpperCamelCase : Any = super().prepare_init_args_and_inputs_for_common()
__UpperCamelCase : int = 3_2
return init_dict, inputs_dict
@unittest.skipIf(torch_device == "mps" , "MPS result is not consistent" )
def _lowerCamelCase ( self :str ) -> Dict:
__UpperCamelCase : Dict = [0.7921, -0.0992, -0.1962, -0.7695, -0.4242, 0.7804, 0.4737, 0.2765, 0.3338]
super().test_output(__lowercase )
class lowerCamelCase__ ( lowerCamelCase__ , unittest.TestCase):
'''simple docstring'''
_A = SkipDownBlockaD # noqa F405
_A = 'down'
@property
def _lowerCamelCase ( self :int ) -> Dict:
return super().get_dummy_input(include_skip_sample=__lowercase )
def _lowerCamelCase ( self :List[str] ) -> int:
__UpperCamelCase : List[Any] = [-0.0845, -0.2087, -0.2465, 0.0971, 0.1900, -0.0484, 0.2664, 0.4179, 0.5069]
super().test_output(__lowercase )
class lowerCamelCase__ ( lowerCamelCase__ , unittest.TestCase):
'''simple docstring'''
_A = AttnSkipDownBlockaD # noqa F405
_A = 'down'
@property
def _lowerCamelCase ( self :Optional[Any] ) -> str:
return super().get_dummy_input(include_skip_sample=__lowercase )
def _lowerCamelCase ( self :str ) -> Optional[Any]:
__UpperCamelCase : Optional[int] = [0.5539, 0.1609, 0.4924, 0.0537, -0.1995, 0.4050, 0.0979, -0.2721, -0.0642]
super().test_output(__lowercase )
class lowerCamelCase__ ( lowerCamelCase__ , unittest.TestCase):
'''simple docstring'''
_A = DownEncoderBlockaD # noqa F405
_A = 'down'
@property
def _lowerCamelCase ( self :Optional[int] ) -> int:
return super().get_dummy_input(include_temb=__lowercase )
def _lowerCamelCase ( self :str ) -> Optional[int]:
__UpperCamelCase : List[str] = {
"in_channels": 3_2,
"out_channels": 3_2,
}
__UpperCamelCase : List[Any] = self.dummy_input
return init_dict, inputs_dict
def _lowerCamelCase ( self :int ) -> Any:
__UpperCamelCase : str = [1.1102, 0.5302, 0.4872, -0.0023, -0.8042, 0.0483, -0.3489, -0.5632, 0.7626]
super().test_output(__lowercase )
class lowerCamelCase__ ( lowerCamelCase__ , unittest.TestCase):
'''simple docstring'''
_A = AttnDownEncoderBlockaD # noqa F405
_A = 'down'
@property
def _lowerCamelCase ( self :Dict ) -> List[str]:
return super().get_dummy_input(include_temb=__lowercase )
def _lowerCamelCase ( self :Tuple ) -> Dict:
__UpperCamelCase : Tuple = {
"in_channels": 3_2,
"out_channels": 3_2,
}
__UpperCamelCase : List[Any] = self.dummy_input
return init_dict, inputs_dict
def _lowerCamelCase ( self :List[Any] ) -> Union[str, Any]:
__UpperCamelCase : int = [0.8966, -0.1486, 0.8568, 0.8141, -0.9046, -0.1342, -0.0972, -0.7417, 0.1538]
super().test_output(__lowercase )
class lowerCamelCase__ ( lowerCamelCase__ , unittest.TestCase):
'''simple docstring'''
_A = UNetMidBlockaD # noqa F405
_A = 'mid'
def _lowerCamelCase ( self :int ) -> str:
__UpperCamelCase : List[str] = {
"in_channels": 3_2,
"temb_channels": 1_2_8,
}
__UpperCamelCase : Any = self.dummy_input
return init_dict, inputs_dict
def _lowerCamelCase ( self :Optional[Any] ) -> Optional[int]:
__UpperCamelCase : Tuple = [-0.1062, 1.7248, 0.3494, 1.4569, -0.0910, -1.2421, -0.9984, 0.6736, 1.0028]
super().test_output(__lowercase )
class lowerCamelCase__ ( lowerCamelCase__ , unittest.TestCase):
'''simple docstring'''
_A = UNetMidBlockaDCrossAttn # noqa F405
_A = 'mid'
def _lowerCamelCase ( self :int ) -> Union[str, Any]:
__UpperCamelCase , __UpperCamelCase : Union[str, Any] = super().prepare_init_args_and_inputs_for_common()
__UpperCamelCase : Any = 3_2
return init_dict, inputs_dict
def _lowerCamelCase ( self :int ) -> List[Any]:
__UpperCamelCase : Optional[Any] = [0.0187, 2.4220, 0.4484, 1.1203, -0.6121, -1.5122, -0.8270, 0.7851, 1.8335]
super().test_output(__lowercase )
class lowerCamelCase__ ( lowerCamelCase__ , unittest.TestCase):
'''simple docstring'''
_A = UNetMidBlockaDSimpleCrossAttn # noqa F405
_A = 'mid'
@property
def _lowerCamelCase ( self :str ) -> Optional[Any]:
return super().get_dummy_input(include_encoder_hidden_states=__lowercase )
def _lowerCamelCase ( self :Optional[int] ) -> Optional[Any]:
__UpperCamelCase , __UpperCamelCase : List[Any] = super().prepare_init_args_and_inputs_for_common()
__UpperCamelCase : Optional[int] = 3_2
return init_dict, inputs_dict
def _lowerCamelCase ( self :Any ) -> List[str]:
__UpperCamelCase : Tuple = [0.7143, 1.9974, 0.5448, 1.3977, 0.1282, -1.1237, -1.4238, 0.5530, 0.8880]
super().test_output(__lowercase )
class lowerCamelCase__ ( lowerCamelCase__ , unittest.TestCase):
'''simple docstring'''
_A = UpBlockaD # noqa F405
_A = 'up'
@property
def _lowerCamelCase ( self :int ) -> Union[str, Any]:
return super().get_dummy_input(include_res_hidden_states_tuple=__lowercase )
def _lowerCamelCase ( self :List[str] ) -> Tuple:
__UpperCamelCase : Tuple = [-0.2041, -0.4165, -0.3022, 0.0041, -0.6628, -0.7053, 0.1928, -0.0325, 0.0523]
super().test_output(__lowercase )
class lowerCamelCase__ ( lowerCamelCase__ , unittest.TestCase):
'''simple docstring'''
_A = ResnetUpsampleBlockaD # noqa F405
_A = 'up'
@property
def _lowerCamelCase ( self :str ) -> int:
return super().get_dummy_input(include_res_hidden_states_tuple=__lowercase )
def _lowerCamelCase ( self :List[str] ) -> Dict:
__UpperCamelCase : List[Any] = [0.2287, 0.3549, -0.1346, 0.4797, -0.1715, -0.9649, 0.7305, -0.5864, -0.6244]
super().test_output(__lowercase )
class lowerCamelCase__ ( lowerCamelCase__ , unittest.TestCase):
'''simple docstring'''
_A = CrossAttnUpBlockaD # noqa F405
_A = 'up'
@property
def _lowerCamelCase ( self :Optional[Any] ) -> List[str]:
return super().get_dummy_input(include_res_hidden_states_tuple=__lowercase )
def _lowerCamelCase ( self :int ) -> Tuple:
__UpperCamelCase , __UpperCamelCase : str = super().prepare_init_args_and_inputs_for_common()
__UpperCamelCase : str = 3_2
return init_dict, inputs_dict
def _lowerCamelCase ( self :Union[str, Any] ) -> Union[str, Any]:
__UpperCamelCase : Union[str, Any] = [-0.1403, -0.3515, -0.0420, -0.1425, 0.3167, 0.5094, -0.2181, 0.5931, 0.5582]
super().test_output(__lowercase )
class lowerCamelCase__ ( lowerCamelCase__ , unittest.TestCase):
'''simple docstring'''
_A = SimpleCrossAttnUpBlockaD # noqa F405
_A = 'up'
@property
def _lowerCamelCase ( self :List[Any] ) -> Any:
return super().get_dummy_input(include_res_hidden_states_tuple=__lowercase , include_encoder_hidden_states=__lowercase )
def _lowerCamelCase ( self :List[Any] ) -> Optional[Any]:
__UpperCamelCase , __UpperCamelCase : Dict = super().prepare_init_args_and_inputs_for_common()
__UpperCamelCase : Optional[int] = 3_2
return init_dict, inputs_dict
def _lowerCamelCase ( self :Any ) -> Optional[Any]:
__UpperCamelCase : Optional[int] = [0.2645, 0.1480, 0.0909, 0.8044, -0.9758, -0.9083, 0.0994, -1.1453, -0.7402]
super().test_output(__lowercase )
class lowerCamelCase__ ( lowerCamelCase__ , unittest.TestCase):
'''simple docstring'''
_A = AttnUpBlockaD # noqa F405
_A = 'up'
@property
def _lowerCamelCase ( self :Optional[Any] ) -> int:
return super().get_dummy_input(include_res_hidden_states_tuple=__lowercase )
@unittest.skipIf(torch_device == "mps" , "MPS result is not consistent" )
def _lowerCamelCase ( self :str ) -> Dict:
__UpperCamelCase : str = [0.0979, 0.1326, 0.0021, 0.0659, 0.2249, 0.0059, 0.1132, 0.5952, 0.1033]
super().test_output(__lowercase )
class lowerCamelCase__ ( lowerCamelCase__ , unittest.TestCase):
'''simple docstring'''
_A = SkipUpBlockaD # noqa F405
_A = 'up'
@property
def _lowerCamelCase ( self :Tuple ) -> Optional[int]:
return super().get_dummy_input(include_res_hidden_states_tuple=__lowercase )
def _lowerCamelCase ( self :Optional[Any] ) -> Dict:
__UpperCamelCase : Optional[int] = [-0.0893, -0.1234, -0.1506, -0.0332, 0.0123, -0.0211, 0.0566, 0.0143, 0.0362]
super().test_output(__lowercase )
class lowerCamelCase__ ( lowerCamelCase__ , unittest.TestCase):
'''simple docstring'''
_A = AttnSkipUpBlockaD # noqa F405
_A = 'up'
@property
def _lowerCamelCase ( self :int ) -> str:
return super().get_dummy_input(include_res_hidden_states_tuple=__lowercase )
def _lowerCamelCase ( self :Tuple ) -> Optional[Any]:
__UpperCamelCase : Dict = [0.0361, 0.0617, 0.2787, -0.0350, 0.0342, 0.3421, -0.0843, 0.0913, 0.3015]
super().test_output(__lowercase )
class lowerCamelCase__ ( lowerCamelCase__ , unittest.TestCase):
'''simple docstring'''
_A = UpDecoderBlockaD # noqa F405
_A = 'up'
@property
def _lowerCamelCase ( self :str ) -> List[Any]:
return super().get_dummy_input(include_temb=__lowercase )
def _lowerCamelCase ( self :Any ) -> Union[str, Any]:
__UpperCamelCase : str = {"in_channels": 3_2, "out_channels": 3_2}
__UpperCamelCase : Optional[int] = self.dummy_input
return init_dict, inputs_dict
def _lowerCamelCase ( self :str ) -> Dict:
__UpperCamelCase : Dict = [0.4404, 0.1998, -0.9886, -0.3320, -0.3128, -0.7034, -0.6955, -0.2338, -0.3137]
super().test_output(__lowercase )
class lowerCamelCase__ ( lowerCamelCase__ , unittest.TestCase):
'''simple docstring'''
_A = AttnUpDecoderBlockaD # noqa F405
_A = 'up'
@property
def _lowerCamelCase ( self :Union[str, Any] ) -> Optional[int]:
return super().get_dummy_input(include_temb=__lowercase )
def _lowerCamelCase ( self :str ) -> str:
__UpperCamelCase : Union[str, Any] = {"in_channels": 3_2, "out_channels": 3_2}
__UpperCamelCase : int = self.dummy_input
return init_dict, inputs_dict
def _lowerCamelCase ( self :Union[str, Any] ) -> Tuple:
__UpperCamelCase : str = [0.6738, 0.4491, 0.1055, 1.0710, 0.7316, 0.3339, 0.3352, 0.1023, 0.3568]
super().test_output(__lowercase )
| 232
|
import argparse
import json
from pathlib import Path
import torch
import torchaudio
from datasets import load_dataset
from huggingface_hub import hf_hub_download
from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase__ = logging.get_logger(__name__)
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
__a = ASTConfig()
if "10-10" in model_name:
pass
elif "speech-commands" in model_name:
__a = 128
elif "12-12" in model_name:
__a = 12
__a = 12
elif "14-14" in model_name:
__a = 14
__a = 14
elif "16-16" in model_name:
__a = 16
__a = 16
else:
raise ValueError("""Model not supported""" )
__a = """huggingface/label-files"""
if "speech-commands" in model_name:
__a = 35
__a = """speech-commands-v2-id2label.json"""
else:
__a = 527
__a = """audioset-id2label.json"""
__a = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type="""dataset""" ) , """r""" ) )
__a = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
__a = idalabel
__a = {v: k for k, v in idalabel.items()}
return config
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
if "module.v" in name:
__a = name.replace("""module.v""" , """audio_spectrogram_transformer""" )
if "cls_token" in name:
__a = name.replace("""cls_token""" , """embeddings.cls_token""" )
if "dist_token" in name:
__a = name.replace("""dist_token""" , """embeddings.distillation_token""" )
if "pos_embed" in name:
__a = name.replace("""pos_embed""" , """embeddings.position_embeddings""" )
if "patch_embed.proj" in name:
__a = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
# transformer blocks
if "blocks" in name:
__a = name.replace("""blocks""" , """encoder.layer""" )
if "attn.proj" in name:
__a = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
__a = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
__a = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
__a = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
__a = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
__a = name.replace("""mlp.fc2""" , """output.dense""" )
# final layernorm
if "audio_spectrogram_transformer.norm" in name:
__a = name.replace("""audio_spectrogram_transformer.norm""" , """audio_spectrogram_transformer.layernorm""" )
# classifier head
if "module.mlp_head.0" in name:
__a = name.replace("""module.mlp_head.0""" , """classifier.layernorm""" )
if "module.mlp_head.1" in name:
__a = name.replace("""module.mlp_head.1""" , """classifier.dense""" )
return name
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Union[str, Any] ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
__a = orig_state_dict.pop(_SCREAMING_SNAKE_CASE )
if "qkv" in key:
__a = key.split(""".""" )
__a = int(key_split[3] )
__a = config.hidden_size
if "weight" in key:
__a = val[:dim, :]
__a = val[dim : dim * 2, :]
__a = val[-dim:, :]
else:
__a = val[:dim]
__a = val[dim : dim * 2]
__a = val[-dim:]
else:
__a = val
return orig_state_dict
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[str] ):
"""simple docstring"""
__a = [
"""module.v.head.weight""",
"""module.v.head.bias""",
"""module.v.head_dist.weight""",
"""module.v.head_dist.bias""",
]
for k in ignore_keys:
state_dict.pop(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@torch.no_grad()
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : List[str]=False ):
"""simple docstring"""
__a = get_audio_spectrogram_transformer_config(_SCREAMING_SNAKE_CASE )
__a = {
"""ast-finetuned-audioset-10-10-0.4593""": (
"""https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1"""
),
"""ast-finetuned-audioset-10-10-0.450""": (
"""https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1"""
),
"""ast-finetuned-audioset-10-10-0.448""": (
"""https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1"""
),
"""ast-finetuned-audioset-10-10-0.448-v2""": (
"""https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1"""
),
"""ast-finetuned-audioset-12-12-0.447""": (
"""https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1"""
),
"""ast-finetuned-audioset-14-14-0.443""": (
"""https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1"""
),
"""ast-finetuned-audioset-16-16-0.442""": (
"""https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1"""
),
"""ast-finetuned-speech-commands-v2""": (
"""https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1"""
),
}
# load original state_dict
__a = model_name_to_url[model_name]
__a = torch.hub.load_state_dict_from_url(_SCREAMING_SNAKE_CASE , map_location="""cpu""" )
# remove some keys
remove_keys(_SCREAMING_SNAKE_CASE )
# rename some keys
__a = convert_state_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# load 🤗 model
__a = ASTForAudioClassification(_SCREAMING_SNAKE_CASE )
model.eval()
model.load_state_dict(_SCREAMING_SNAKE_CASE )
# verify outputs on dummy input
# source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62
__a = -4.267_7393 if """speech-commands""" not in model_name else -6.84_5978
__a = 4.568_9974 if """speech-commands""" not in model_name else 5.565_4526
__a = 1024 if """speech-commands""" not in model_name else 128
__a = ASTFeatureExtractor(mean=_SCREAMING_SNAKE_CASE , std=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE )
if "speech-commands" in model_name:
__a = load_dataset("""speech_commands""" , """v0.02""" , split="""validation""" )
__a = dataset[0]["""audio"""]["""array"""]
else:
__a = hf_hub_download(
repo_id="""nielsr/audio-spectogram-transformer-checkpoint""" , filename="""sample_audio.flac""" , repo_type="""dataset""" , )
__a , __a = torchaudio.load(_SCREAMING_SNAKE_CASE )
__a = waveform.squeeze().numpy()
__a = feature_extractor(_SCREAMING_SNAKE_CASE , sampling_rate=1_6000 , return_tensors="""pt""" )
# forward pass
__a = model(**_SCREAMING_SNAKE_CASE )
__a = outputs.logits
if model_name == "ast-finetuned-audioset-10-10-0.4593":
__a = torch.tensor([-0.8760, -7.0042, -8.6602] )
elif model_name == "ast-finetuned-audioset-10-10-0.450":
__a = torch.tensor([-1.1986, -7.0903, -8.2718] )
elif model_name == "ast-finetuned-audioset-10-10-0.448":
__a = torch.tensor([-2.6128, -8.0080, -9.4344] )
elif model_name == "ast-finetuned-audioset-10-10-0.448-v2":
__a = torch.tensor([-1.5080, -7.4534, -8.8917] )
elif model_name == "ast-finetuned-audioset-12-12-0.447":
__a = torch.tensor([-0.5050, -6.5833, -8.0843] )
elif model_name == "ast-finetuned-audioset-14-14-0.443":
__a = torch.tensor([-0.3826, -7.0336, -8.2413] )
elif model_name == "ast-finetuned-audioset-16-16-0.442":
__a = torch.tensor([-1.2113, -6.9101, -8.3470] )
elif model_name == "ast-finetuned-speech-commands-v2":
__a = torch.tensor([6.1589, -8.0566, -8.7984] )
else:
raise ValueError("""Unknown model name""" )
if not torch.allclose(logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 ):
raise ValueError("""Logits don't match""" )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
print(f"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
print(f"Saving feature extractor to {pytorch_dump_folder_path}" )
feature_extractor.save_pretrained(_SCREAMING_SNAKE_CASE )
if push_to_hub:
print("""Pushing model and feature extractor to the hub...""" )
model.push_to_hub(f"MIT/{model_name}" )
feature_extractor.push_to_hub(f"MIT/{model_name}" )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""ast-finetuned-audioset-10-10-0.4593""",
type=str,
help="""Name of the Audio Spectrogram Transformer model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
lowerCamelCase__ = parser.parse_args()
convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 302
| 0
|
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope="session" )
def _UpperCAmelCase ():
_A : Optional[int] = 10
_A : List[str] = datasets.Features(
{
"tokens": datasets.Sequence(datasets.Value("string" ) ),
"labels": datasets.Sequence(datasets.ClassLabel(names=["negative", "positive"] ) ),
"answers": datasets.Sequence(
{
"text": datasets.Value("string" ),
"answer_start": datasets.Value("int32" ),
} ),
"id": datasets.Value("int64" ),
} )
_A : List[str] = datasets.Dataset.from_dict(
{
"tokens": [["foo"] * 5] * n,
"labels": [[1] * 5] * n,
"answers": [{"answer_start": [97], "text": ["1976"]}] * 10,
"id": list(range(_SCREAMING_SNAKE_CASE ) ),
} , features=_SCREAMING_SNAKE_CASE , )
return dataset
@pytest.fixture(scope="session" )
def _UpperCAmelCase (UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[int] ):
_A : Union[str, Any] = str(tmp_path_factory.mktemp("data" ) / "file.arrow" )
dataset.map(cache_file_name=_SCREAMING_SNAKE_CASE )
return filename
# FILE_CONTENT + files
lowerCAmelCase__ = '\\n Text data.\n Second line of data.'
@pytest.fixture(scope="session" )
def _UpperCAmelCase (UpperCamelCase__ : Any ):
_A : Optional[Any] = tmp_path_factory.mktemp("data" ) / "file.txt"
_A : int = FILE_CONTENT
with open(_SCREAMING_SNAKE_CASE , "w" ) as f:
f.write(_SCREAMING_SNAKE_CASE )
return filename
@pytest.fixture(scope="session" )
def _UpperCAmelCase (UpperCamelCase__ : Optional[int] ):
import bza
_A : Tuple = tmp_path_factory.mktemp("data" ) / "file.txt.bz2"
_A : Dict = bytes(_SCREAMING_SNAKE_CASE , "utf-8" )
with bza.open(_SCREAMING_SNAKE_CASE , "wb" ) as f:
f.write(_SCREAMING_SNAKE_CASE )
return path
@pytest.fixture(scope="session" )
def _UpperCAmelCase (UpperCamelCase__ : Union[str, Any] ):
import gzip
_A : List[str] = str(tmp_path_factory.mktemp("data" ) / "file.txt.gz" )
_A : Optional[Any] = bytes(_SCREAMING_SNAKE_CASE , "utf-8" )
with gzip.open(_SCREAMING_SNAKE_CASE , "wb" ) as f:
f.write(_SCREAMING_SNAKE_CASE )
return path
@pytest.fixture(scope="session" )
def _UpperCAmelCase (UpperCamelCase__ : Optional[int] ):
if datasets.config.LZ4_AVAILABLE:
import lza.frame
_A : Union[str, Any] = tmp_path_factory.mktemp("data" ) / "file.txt.lz4"
_A : str = bytes(_SCREAMING_SNAKE_CASE , "utf-8" )
with lza.frame.open(_SCREAMING_SNAKE_CASE , "wb" ) as f:
f.write(_SCREAMING_SNAKE_CASE )
return path
@pytest.fixture(scope="session" )
def _UpperCAmelCase (UpperCamelCase__ : str , UpperCamelCase__ : List[Any] ):
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
_A : Tuple = tmp_path_factory.mktemp("data" ) / "file.txt.7z"
with pyazr.SevenZipFile(_SCREAMING_SNAKE_CASE , "w" ) as archive:
archive.write(_SCREAMING_SNAKE_CASE , arcname=os.path.basename(_SCREAMING_SNAKE_CASE ) )
return path
@pytest.fixture(scope="session" )
def _UpperCAmelCase (UpperCamelCase__ : str , UpperCamelCase__ : Any ):
import tarfile
_A : Union[str, Any] = tmp_path_factory.mktemp("data" ) / "file.txt.tar"
with tarfile.TarFile(_SCREAMING_SNAKE_CASE , "w" ) as f:
f.add(_SCREAMING_SNAKE_CASE , arcname=os.path.basename(_SCREAMING_SNAKE_CASE ) )
return path
@pytest.fixture(scope="session" )
def _UpperCAmelCase (UpperCamelCase__ : List[str] ):
import lzma
_A : Optional[int] = tmp_path_factory.mktemp("data" ) / "file.txt.xz"
_A : str = bytes(_SCREAMING_SNAKE_CASE , "utf-8" )
with lzma.open(_SCREAMING_SNAKE_CASE , "wb" ) as f:
f.write(_SCREAMING_SNAKE_CASE )
return path
@pytest.fixture(scope="session" )
def _UpperCAmelCase (UpperCamelCase__ : Dict , UpperCamelCase__ : List[str] ):
import zipfile
_A : str = tmp_path_factory.mktemp("data" ) / "file.txt.zip"
with zipfile.ZipFile(_SCREAMING_SNAKE_CASE , "w" ) as f:
f.write(_SCREAMING_SNAKE_CASE , arcname=os.path.basename(_SCREAMING_SNAKE_CASE ) )
return path
@pytest.fixture(scope="session" )
def _UpperCAmelCase (UpperCamelCase__ : Union[str, Any] ):
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
_A : Optional[int] = tmp_path_factory.mktemp("data" ) / "file.txt.zst"
_A : Dict = bytes(_SCREAMING_SNAKE_CASE , "utf-8" )
with zstd.open(_SCREAMING_SNAKE_CASE , "wb" ) as f:
f.write(_SCREAMING_SNAKE_CASE )
return path
@pytest.fixture(scope="session" )
def _UpperCAmelCase (UpperCamelCase__ : int ):
_A : int = tmp_path_factory.mktemp("data" ) / "file.xml"
_A : Dict = textwrap.dedent(
"\\n <?xml version=\"1.0\" encoding=\"UTF-8\" ?>\n <tmx version=\"1.4\">\n <header segtype=\"sentence\" srclang=\"ca\" />\n <body>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 1</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 1</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 2</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 2</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 3</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 3</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 4</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 4</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 5</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 5</seg></tuv>\n </tu>\n </body>\n </tmx>" )
with open(_SCREAMING_SNAKE_CASE , "w" ) as f:
f.write(_SCREAMING_SNAKE_CASE )
return filename
lowerCAmelCase__ = [
{'col_1': '0', 'col_2': 0, 'col_3': 0.0},
{'col_1': '1', 'col_2': 1, 'col_3': 1.0},
{'col_1': '2', 'col_2': 2, 'col_3': 2.0},
{'col_1': '3', 'col_2': 3, 'col_3': 3.0},
]
lowerCAmelCase__ = [
{'col_1': '4', 'col_2': 4, 'col_3': 4.0},
{'col_1': '5', 'col_2': 5, 'col_3': 5.0},
]
lowerCAmelCase__ = {
'col_1': ['0', '1', '2', '3'],
'col_2': [0, 1, 2, 3],
'col_3': [0.0, 1.0, 2.0, 3.0],
}
lowerCAmelCase__ = [
{'col_3': 0.0, 'col_1': '0', 'col_2': 0},
{'col_3': 1.0, 'col_1': '1', 'col_2': 1},
]
lowerCAmelCase__ = [
{'col_1': 's0', 'col_2': 0, 'col_3': 0.0},
{'col_1': 's1', 'col_2': 1, 'col_3': 1.0},
{'col_1': 's2', 'col_2': 2, 'col_3': 2.0},
{'col_1': 's3', 'col_2': 3, 'col_3': 3.0},
]
@pytest.fixture(scope="session" )
def _UpperCAmelCase ():
return DATA_DICT_OF_LISTS
@pytest.fixture(scope="session" )
def _UpperCAmelCase (UpperCamelCase__ : str ):
_A : List[str] = datasets.Dataset.from_dict(_SCREAMING_SNAKE_CASE )
_A : int = str(tmp_path_factory.mktemp("data" ) / "dataset.arrow" )
dataset.map(cache_file_name=_SCREAMING_SNAKE_CASE )
return path
@pytest.fixture(scope="session" )
def _UpperCAmelCase (UpperCamelCase__ : List[Any] ):
_A : Any = str(tmp_path_factory.mktemp("data" ) / "dataset.sqlite" )
with contextlib.closing(sqlitea.connect(_SCREAMING_SNAKE_CASE ) ) as con:
_A : Union[str, Any] = con.cursor()
cur.execute("CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)" )
for item in DATA:
cur.execute("INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)" , tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope="session" )
def _UpperCAmelCase (UpperCamelCase__ : Union[str, Any] ):
_A : str = str(tmp_path_factory.mktemp("data" ) / "dataset.csv" )
with open(_SCREAMING_SNAKE_CASE , "w" , newline="" ) as f:
_A : str = csv.DictWriter(_SCREAMING_SNAKE_CASE , fieldnames=["col_1", "col_2", "col_3"] )
writer.writeheader()
for item in DATA:
writer.writerow(_SCREAMING_SNAKE_CASE )
return path
@pytest.fixture(scope="session" )
def _UpperCAmelCase (UpperCamelCase__ : Union[str, Any] ):
_A : Optional[Any] = str(tmp_path_factory.mktemp("data" ) / "dataset2.csv" )
with open(_SCREAMING_SNAKE_CASE , "w" , newline="" ) as f:
_A : Any = csv.DictWriter(_SCREAMING_SNAKE_CASE , fieldnames=["col_1", "col_2", "col_3"] )
writer.writeheader()
for item in DATA:
writer.writerow(_SCREAMING_SNAKE_CASE )
return path
@pytest.fixture(scope="session" )
def _UpperCAmelCase (UpperCamelCase__ : Dict , UpperCamelCase__ : List[str] ):
import bza
_A : List[Any] = tmp_path_factory.mktemp("data" ) / "dataset.csv.bz2"
with open(_SCREAMING_SNAKE_CASE , "rb" ) as f:
_A : Optional[int] = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(_SCREAMING_SNAKE_CASE , "wb" ) as f:
f.write(_SCREAMING_SNAKE_CASE )
return path
@pytest.fixture(scope="session" )
def _UpperCAmelCase (UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any] ):
_A : int = tmp_path_factory.mktemp("data" ) / "dataset.csv.zip"
with zipfile.ZipFile(_SCREAMING_SNAKE_CASE , "w" ) as f:
f.write(_SCREAMING_SNAKE_CASE , arcname=os.path.basename(_SCREAMING_SNAKE_CASE ) )
f.write(_SCREAMING_SNAKE_CASE , arcname=os.path.basename(_SCREAMING_SNAKE_CASE ) )
return path
@pytest.fixture(scope="session" )
def _UpperCAmelCase (UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Union[str, Any] ):
_A : Dict = tmp_path_factory.mktemp("data" ) / "dataset.csv.zip"
with zipfile.ZipFile(_SCREAMING_SNAKE_CASE , "w" ) as f:
f.write(_SCREAMING_SNAKE_CASE , arcname=os.path.basename(csv_path.replace(".csv" , ".CSV" ) ) )
f.write(_SCREAMING_SNAKE_CASE , arcname=os.path.basename(csva_path.replace(".csv" , ".CSV" ) ) )
return path
@pytest.fixture(scope="session" )
def _UpperCAmelCase (UpperCamelCase__ : int , UpperCamelCase__ : Any , UpperCamelCase__ : Dict ):
_A : Dict = tmp_path_factory.mktemp("data" ) / "dataset_with_dir.csv.zip"
with zipfile.ZipFile(_SCREAMING_SNAKE_CASE , "w" ) as f:
f.write(_SCREAMING_SNAKE_CASE , arcname=os.path.join("main_dir" , os.path.basename(_SCREAMING_SNAKE_CASE ) ) )
f.write(_SCREAMING_SNAKE_CASE , arcname=os.path.join("main_dir" , os.path.basename(_SCREAMING_SNAKE_CASE ) ) )
return path
@pytest.fixture(scope="session" )
def _UpperCAmelCase (UpperCamelCase__ : Optional[int] ):
_A : Optional[int] = str(tmp_path_factory.mktemp("data" ) / "dataset.parquet" )
_A : Tuple = pa.schema(
{
"col_1": pa.string(),
"col_2": pa.intaa(),
"col_3": pa.floataa(),
} )
with open(_SCREAMING_SNAKE_CASE , "wb" ) as f:
_A : Optional[int] = pq.ParquetWriter(_SCREAMING_SNAKE_CASE , schema=_SCREAMING_SNAKE_CASE )
_A : Optional[int] = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(_SCREAMING_SNAKE_CASE ) )] for k in DATA[0]} , schema=_SCREAMING_SNAKE_CASE )
writer.write_table(_SCREAMING_SNAKE_CASE )
writer.close()
return path
@pytest.fixture(scope="session" )
def _UpperCAmelCase (UpperCamelCase__ : Any ):
_A : List[Any] = str(tmp_path_factory.mktemp("data" ) / "dataset.json" )
_A : Tuple = {"data": DATA}
with open(_SCREAMING_SNAKE_CASE , "w" ) as f:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return path
@pytest.fixture(scope="session" )
def _UpperCAmelCase (UpperCamelCase__ : Optional[int] ):
_A : Optional[Any] = str(tmp_path_factory.mktemp("data" ) / "dataset.json" )
_A : Tuple = {"data": DATA_DICT_OF_LISTS}
with open(_SCREAMING_SNAKE_CASE , "w" ) as f:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return path
@pytest.fixture(scope="session" )
def _UpperCAmelCase (UpperCamelCase__ : Tuple ):
_A : List[str] = str(tmp_path_factory.mktemp("data" ) / "dataset.jsonl" )
with open(_SCREAMING_SNAKE_CASE , "w" ) as f:
for item in DATA:
f.write(json.dumps(_SCREAMING_SNAKE_CASE ) + "\n" )
return path
@pytest.fixture(scope="session" )
def _UpperCAmelCase (UpperCamelCase__ : List[str] ):
_A : Union[str, Any] = str(tmp_path_factory.mktemp("data" ) / "dataset2.jsonl" )
with open(_SCREAMING_SNAKE_CASE , "w" ) as f:
for item in DATA:
f.write(json.dumps(_SCREAMING_SNAKE_CASE ) + "\n" )
return path
@pytest.fixture(scope="session" )
def _UpperCAmelCase (UpperCamelCase__ : Dict ):
_A : Optional[Any] = str(tmp_path_factory.mktemp("data" ) / "dataset_312.jsonl" )
with open(_SCREAMING_SNAKE_CASE , "w" ) as f:
for item in DATA_312:
f.write(json.dumps(_SCREAMING_SNAKE_CASE ) + "\n" )
return path
@pytest.fixture(scope="session" )
def _UpperCAmelCase (UpperCamelCase__ : Union[str, Any] ):
_A : List[str] = str(tmp_path_factory.mktemp("data" ) / "dataset-str.jsonl" )
with open(_SCREAMING_SNAKE_CASE , "w" ) as f:
for item in DATA_STR:
f.write(json.dumps(_SCREAMING_SNAKE_CASE ) + "\n" )
return path
@pytest.fixture(scope="session" )
def _UpperCAmelCase (UpperCamelCase__ : str , UpperCamelCase__ : str ):
import gzip
_A : List[str] = str(tmp_path_factory.mktemp("data" ) / "dataset.txt.gz" )
with open(_SCREAMING_SNAKE_CASE , "rb" ) as orig_file:
with gzip.open(_SCREAMING_SNAKE_CASE , "wb" ) as zipped_file:
zipped_file.writelines(_SCREAMING_SNAKE_CASE )
return path
@pytest.fixture(scope="session" )
def _UpperCAmelCase (UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple ):
import gzip
_A : Tuple = str(tmp_path_factory.mktemp("data" ) / "dataset.jsonl.gz" )
with open(_SCREAMING_SNAKE_CASE , "rb" ) as orig_file:
with gzip.open(_SCREAMING_SNAKE_CASE , "wb" ) as zipped_file:
zipped_file.writelines(_SCREAMING_SNAKE_CASE )
return path
@pytest.fixture(scope="session" )
def _UpperCAmelCase (UpperCamelCase__ : Any , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[Any] ):
_A : List[Any] = tmp_path_factory.mktemp("data" ) / "dataset.jsonl.zip"
with zipfile.ZipFile(_SCREAMING_SNAKE_CASE , "w" ) as f:
f.write(_SCREAMING_SNAKE_CASE , arcname=os.path.basename(_SCREAMING_SNAKE_CASE ) )
f.write(_SCREAMING_SNAKE_CASE , arcname=os.path.basename(_SCREAMING_SNAKE_CASE ) )
return path
@pytest.fixture(scope="session" )
def _UpperCAmelCase (UpperCamelCase__ : Tuple , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int ):
_A : Optional[int] = tmp_path_factory.mktemp("data" ) / "dataset_nested.jsonl.zip"
with zipfile.ZipFile(_SCREAMING_SNAKE_CASE , "w" ) as f:
f.write(_SCREAMING_SNAKE_CASE , arcname=os.path.join("nested" , os.path.basename(_SCREAMING_SNAKE_CASE ) ) )
return path
@pytest.fixture(scope="session" )
def _UpperCAmelCase (UpperCamelCase__ : Any , UpperCamelCase__ : str , UpperCamelCase__ : List[Any] ):
_A : str = tmp_path_factory.mktemp("data" ) / "dataset_with_dir.jsonl.zip"
with zipfile.ZipFile(_SCREAMING_SNAKE_CASE , "w" ) as f:
f.write(_SCREAMING_SNAKE_CASE , arcname=os.path.join("main_dir" , os.path.basename(_SCREAMING_SNAKE_CASE ) ) )
f.write(_SCREAMING_SNAKE_CASE , arcname=os.path.join("main_dir" , os.path.basename(_SCREAMING_SNAKE_CASE ) ) )
return path
@pytest.fixture(scope="session" )
def _UpperCAmelCase (UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : str ):
_A : Tuple = tmp_path_factory.mktemp("data" ) / "dataset.jsonl.tar"
with tarfile.TarFile(_SCREAMING_SNAKE_CASE , "w" ) as f:
f.add(_SCREAMING_SNAKE_CASE , arcname=os.path.basename(_SCREAMING_SNAKE_CASE ) )
f.add(_SCREAMING_SNAKE_CASE , arcname=os.path.basename(_SCREAMING_SNAKE_CASE ) )
return path
@pytest.fixture(scope="session" )
def _UpperCAmelCase (UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Union[str, Any] ):
_A : str = tmp_path_factory.mktemp("data" ) / "dataset_nested.jsonl.tar"
with tarfile.TarFile(_SCREAMING_SNAKE_CASE , "w" ) as f:
f.add(_SCREAMING_SNAKE_CASE , arcname=os.path.join("nested" , os.path.basename(_SCREAMING_SNAKE_CASE ) ) )
return path
@pytest.fixture(scope="session" )
def _UpperCAmelCase (UpperCamelCase__ : List[Any] ):
_A : List[str] = ["0", "1", "2", "3"]
_A : Optional[int] = str(tmp_path_factory.mktemp("data" ) / "dataset.txt" )
with open(_SCREAMING_SNAKE_CASE , "w" ) as f:
for item in data:
f.write(item + "\n" )
return path
@pytest.fixture(scope="session" )
def _UpperCAmelCase (UpperCamelCase__ : Tuple ):
_A : int = ["0", "1", "2", "3"]
_A : Optional[int] = str(tmp_path_factory.mktemp("data" ) / "dataset2.txt" )
with open(_SCREAMING_SNAKE_CASE , "w" ) as f:
for item in data:
f.write(item + "\n" )
return path
@pytest.fixture(scope="session" )
def _UpperCAmelCase (UpperCamelCase__ : Any ):
_A : List[str] = ["0", "1", "2", "3"]
_A : List[str] = tmp_path_factory.mktemp("data" ) / "dataset.abc"
with open(_SCREAMING_SNAKE_CASE , "w" ) as f:
for item in data:
f.write(item + "\n" )
return path
@pytest.fixture(scope="session" )
def _UpperCAmelCase (UpperCamelCase__ : str , UpperCamelCase__ : int , UpperCamelCase__ : int ):
_A : Optional[int] = tmp_path_factory.mktemp("data" ) / "dataset.text.zip"
with zipfile.ZipFile(_SCREAMING_SNAKE_CASE , "w" ) as f:
f.write(_SCREAMING_SNAKE_CASE , arcname=os.path.basename(_SCREAMING_SNAKE_CASE ) )
f.write(_SCREAMING_SNAKE_CASE , arcname=os.path.basename(_SCREAMING_SNAKE_CASE ) )
return path
@pytest.fixture(scope="session" )
def _UpperCAmelCase (UpperCamelCase__ : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : int ):
_A : int = tmp_path_factory.mktemp("data" ) / "dataset_with_dir.text.zip"
with zipfile.ZipFile(_SCREAMING_SNAKE_CASE , "w" ) as f:
f.write(_SCREAMING_SNAKE_CASE , arcname=os.path.join("main_dir" , os.path.basename(_SCREAMING_SNAKE_CASE ) ) )
f.write(_SCREAMING_SNAKE_CASE , arcname=os.path.join("main_dir" , os.path.basename(_SCREAMING_SNAKE_CASE ) ) )
return path
@pytest.fixture(scope="session" )
def _UpperCAmelCase (UpperCamelCase__ : Optional[int] , UpperCamelCase__ : str , UpperCamelCase__ : Dict ):
_A : int = tmp_path_factory.mktemp("data" ) / "dataset.ext.zip"
with zipfile.ZipFile(_SCREAMING_SNAKE_CASE , "w" ) as f:
f.write(_SCREAMING_SNAKE_CASE , arcname=os.path.basename("unsupported.ext" ) )
f.write(_SCREAMING_SNAKE_CASE , arcname=os.path.basename("unsupported_2.ext" ) )
return path
@pytest.fixture(scope="session" )
def _UpperCAmelCase (UpperCamelCase__ : str ):
_A : Dict = "\n".join(["First", "Second\u2029with Unicode new line", "Third"] )
_A : Any = str(tmp_path_factory.mktemp("data" ) / "dataset_with_unicode_new_lines.txt" )
with open(_SCREAMING_SNAKE_CASE , "w" , encoding="utf-8" ) as f:
f.write(_SCREAMING_SNAKE_CASE )
return path
@pytest.fixture(scope="session" )
def _UpperCAmelCase ():
return os.path.join("tests" , "features" , "data" , "test_image_rgb.jpg" )
@pytest.fixture(scope="session" )
def _UpperCAmelCase ():
return os.path.join("tests" , "features" , "data" , "test_audio_44100.wav" )
@pytest.fixture(scope="session" )
def _UpperCAmelCase (UpperCamelCase__ : str , UpperCamelCase__ : int ):
_A : List[Any] = tmp_path_factory.mktemp("data" ) / "dataset.img.zip"
with zipfile.ZipFile(_SCREAMING_SNAKE_CASE , "w" ) as f:
f.write(_SCREAMING_SNAKE_CASE , arcname=os.path.basename(_SCREAMING_SNAKE_CASE ) )
f.write(_SCREAMING_SNAKE_CASE , arcname=os.path.basename(_SCREAMING_SNAKE_CASE ).replace(".jpg" , "2.jpg" ) )
return path
@pytest.fixture(scope="session" )
def _UpperCAmelCase (UpperCamelCase__ : str ):
_A : List[str] = tmp_path_factory.mktemp("data_dir" )
(data_dir / "subdir").mkdir()
with open(data_dir / "subdir" / "train.txt" , "w" ) as f:
f.write("foo\n" * 10 )
with open(data_dir / "subdir" / "test.txt" , "w" ) as f:
f.write("bar\n" * 10 )
# hidden file
with open(data_dir / "subdir" / ".test.txt" , "w" ) as f:
f.write("bar\n" * 10 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / ".subdir" / "train.txt" , "w" ) as f:
f.write("foo\n" * 10 )
with open(data_dir / ".subdir" / "test.txt" , "w" ) as f:
f.write("bar\n" * 10 )
return data_dir
| 11
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_albert import AlbertTokenizer
else:
lowerCamelCase__ = None
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
lowerCamelCase__ = {
"""vocab_file""": {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/spiece.model""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/spiece.model""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/spiece.model""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/spiece.model""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model""",
},
"""tokenizer_file""": {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json""",
},
}
lowerCamelCase__ = {
"""albert-base-v1""": 512,
"""albert-large-v1""": 512,
"""albert-xlarge-v1""": 512,
"""albert-xxlarge-v1""": 512,
"""albert-base-v2""": 512,
"""albert-large-v2""": 512,
"""albert-xlarge-v2""": 512,
"""albert-xxlarge-v2""": 512,
}
lowerCamelCase__ = """▁"""
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
__lowerCamelCase : List[Any] =VOCAB_FILES_NAMES
__lowerCamelCase : Union[str, Any] =PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase : List[str] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase : Any =AlbertTokenizer
def __init__( self : Tuple , __lowercase : Union[str, Any]=None , __lowercase : Optional[int]=None , __lowercase : int=True , __lowercase : Dict=True , __lowercase : str=False , __lowercase : str="[CLS]" , __lowercase : List[Any]="[SEP]" , __lowercase : Any="<unk>" , __lowercase : List[Any]="[SEP]" , __lowercase : List[Any]="<pad>" , __lowercase : Optional[Any]="[CLS]" , __lowercase : List[str]="[MASK]" , **__lowercase : str , ):
'''simple docstring'''
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
__a = (
AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase , normalized=__lowercase )
if isinstance(__lowercase , __lowercase )
else mask_token
)
super().__init__(
__lowercase , tokenizer_file=__lowercase , do_lower_case=__lowercase , remove_space=__lowercase , keep_accents=__lowercase , bos_token=__lowercase , eos_token=__lowercase , unk_token=__lowercase , sep_token=__lowercase , pad_token=__lowercase , cls_token=__lowercase , mask_token=__lowercase , **__lowercase , )
__a = do_lower_case
__a = remove_space
__a = keep_accents
__a = vocab_file
__a = False if not self.vocab_file else True
def UpperCamelCase_ ( self : Dict , __lowercase : List[int] , __lowercase : Optional[List[int]] = None ):
'''simple docstring'''
__a = [self.sep_token_id]
__a = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCamelCase_ ( self : str , __lowercase : List[int] , __lowercase : Optional[List[int]] = None ):
'''simple docstring'''
__a = [self.sep_token_id]
__a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase_ ( self : Tuple , __lowercase : str , __lowercase : Optional[str] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(__lowercase ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
__a = os.path.join(
__lowercase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowercase ):
copyfile(self.vocab_file , __lowercase )
return (out_vocab_file,)
| 302
| 0
|
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCAmelCase : Optional[int] = logging.get_logger(__name__)
UpperCAmelCase : Any = {'''tokenizer_file''': '''tokenizer.json'''}
UpperCAmelCase : Optional[Any] = {
'''tokenizer_file''': {
'''bigscience/tokenizer''': '''https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json''',
'''bigscience/bloom-560m''': '''https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json''',
'''bigscience/bloom-1b1''': '''https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json''',
'''bigscience/bloom-1b7''': '''https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json''',
'''bigscience/bloom-3b''': '''https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json''',
'''bigscience/bloom-7b1''': '''https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json''',
'''bigscience/bloom''': '''https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json''',
},
}
class _A( lowerCamelCase__ ):
"""simple docstring"""
UpperCamelCase : int = VOCAB_FILES_NAMES
UpperCamelCase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase : Any = ['input_ids', 'attention_mask']
UpperCamelCase : Dict = None
def __init__( self , _A=None , _A=None , _A=None , _A="<unk>" , _A="<s>" , _A="</s>" , _A="<pad>" , _A=False , _A=False , **_A , ):
super().__init__(
__lowercase , __lowercase , tokenizer_file=__lowercase , unk_token=__lowercase , bos_token=__lowercase , eos_token=__lowercase , pad_token=__lowercase , add_prefix_space=__lowercase , clean_up_tokenization_spaces=__lowercase , **__lowercase , )
__A : Tuple = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , __lowercase ) != add_prefix_space:
__A : Tuple = getattr(__lowercase , pre_tok_state.pop('type' ) )
__A : List[Any] = add_prefix_space
__A : Optional[Any] = pre_tok_class(**__lowercase )
__A : Union[str, Any] = add_prefix_space
def UpperCAmelCase_ ( self , *_A , **_A ):
__A : Any = kwargs.get('is_split_into_words' , __lowercase )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"""
' pretokenized inputs.' )
return super()._batch_encode_plus(*__lowercase , **__lowercase )
def UpperCAmelCase_ ( self , *_A , **_A ):
__A : Any = kwargs.get('is_split_into_words' , __lowercase )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"""
' pretokenized inputs.' )
return super()._encode_plus(*__lowercase , **__lowercase )
def UpperCAmelCase_ ( self , _A , _A = None ):
__A : str = self._tokenizer.model.save(__lowercase , name=__lowercase )
return tuple(__lowercase )
def UpperCAmelCase_ ( self , _A ):
__A : Union[str, Any] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(__lowercase , add_special_tokens=__lowercase ) + [self.eos_token_id] )
if len(__lowercase ) > self.model_max_length:
__A : int = input_ids[-self.model_max_length :]
return input_ids
| 280
|
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
__lowerCamelCase : Optional[int] =(IPNDMScheduler,)
__lowerCamelCase : int =(('num_inference_steps', 50),)
def UpperCamelCase_ ( self : str , **__lowercase : Dict ):
'''simple docstring'''
__a = {"""num_train_timesteps""": 1000}
config.update(**__lowercase )
return config
def UpperCamelCase_ ( self : Any , __lowercase : Tuple=0 , **__lowercase : Dict ):
'''simple docstring'''
__a = dict(self.forward_default_kwargs )
__a = kwargs.pop("""num_inference_steps""" , __lowercase )
__a = self.dummy_sample
__a = 0.1 * sample
__a = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
__a = self.get_scheduler_config(**__lowercase )
__a = scheduler_class(**__lowercase )
scheduler.set_timesteps(__lowercase )
# copy over dummy past residuals
__a = dummy_past_residuals[:]
if time_step is None:
__a = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__lowercase )
__a = scheduler_class.from_pretrained(__lowercase )
new_scheduler.set_timesteps(__lowercase )
# copy over dummy past residuals
__a = dummy_past_residuals[:]
__a = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
__a = new_scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
__a = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
__a = new_scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self : str , __lowercase : int=0 , **__lowercase : Dict ):
'''simple docstring'''
__a = dict(self.forward_default_kwargs )
__a = kwargs.pop("""num_inference_steps""" , __lowercase )
__a = self.dummy_sample
__a = 0.1 * sample
__a = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
__a = self.get_scheduler_config()
__a = scheduler_class(**__lowercase )
scheduler.set_timesteps(__lowercase )
# copy over dummy past residuals (must be after setting timesteps)
__a = dummy_past_residuals[:]
if time_step is None:
__a = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__lowercase )
__a = scheduler_class.from_pretrained(__lowercase )
# copy over dummy past residuals
new_scheduler.set_timesteps(__lowercase )
# copy over dummy past residual (must be after setting timesteps)
__a = dummy_past_residuals[:]
__a = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
__a = new_scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
__a = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
__a = new_scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCamelCase_ ( self : List[str] , **__lowercase : Dict ):
'''simple docstring'''
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config(**__lowercase )
__a = scheduler_class(**__lowercase )
__a = 10
__a = self.dummy_model()
__a = self.dummy_sample_deter
scheduler.set_timesteps(__lowercase )
for i, t in enumerate(scheduler.timesteps ):
__a = model(__lowercase , __lowercase )
__a = scheduler.step(__lowercase , __lowercase , __lowercase ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
__a = model(__lowercase , __lowercase )
__a = scheduler.step(__lowercase , __lowercase , __lowercase ).prev_sample
return sample
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
__a = dict(self.forward_default_kwargs )
__a = kwargs.pop("""num_inference_steps""" , __lowercase )
for scheduler_class in self.scheduler_classes:
__a = self.get_scheduler_config()
__a = scheduler_class(**__lowercase )
__a = self.dummy_sample
__a = 0.1 * sample
if num_inference_steps is not None and hasattr(__lowercase , """set_timesteps""" ):
scheduler.set_timesteps(__lowercase )
elif num_inference_steps is not None and not hasattr(__lowercase , """set_timesteps""" ):
__a = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
__a = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
__a = dummy_past_residuals[:]
__a = scheduler.timesteps[5]
__a = scheduler.timesteps[6]
__a = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
__a = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
__a = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
__a = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=__lowercase , time_step=__lowercase )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=__lowercase , time_step=__lowercase )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
__a = self.full_loop()
__a = torch.mean(torch.abs(__lowercase ) )
assert abs(result_mean.item() - 2540529 ) < 10
| 302
| 0
|
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def _snake_case ( lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = {}
SCREAMING_SNAKE_CASE_ : Tuple = tokenizer(example["content"] , truncation=_SCREAMING_SNAKE_CASE )["input_ids"]
SCREAMING_SNAKE_CASE_ : Tuple = len(example["content"] ) / len(output["input_ids"] )
return output
__lowerCamelCase : str = HfArgumentParser(PretokenizationArguments)
__lowerCamelCase : Any = parser.parse_args()
if args.num_workers is None:
__lowerCamelCase : str = multiprocessing.cpu_count()
__lowerCamelCase : Optional[int] = AutoTokenizer.from_pretrained(args.tokenizer_dir)
__lowerCamelCase : List[str] = time.time()
__lowerCamelCase : Optional[int] = load_dataset(args.dataset_name, split='''train''')
print(f'''Dataset loaded in {time.time()-t_start:.2f}s''')
__lowerCamelCase : Optional[Any] = time.time()
__lowerCamelCase : Optional[int] = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
'''repo_name''',
'''path''',
'''copies''',
'''size''',
'''content''',
'''license''',
'''hash''',
'''line_mean''',
'''line_max''',
'''alpha_frac''',
'''autogenerated''',
],
)
print(f'''Dataset tokenized in {time.time()-t_start:.2f}s''')
__lowerCamelCase : Any = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(f'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
| 18
|
from __future__ import annotations
lowerCamelCase__ = {
"""A""": ["""B""", """C""", """E"""],
"""B""": ["""A""", """D""", """E"""],
"""C""": ["""A""", """F""", """G"""],
"""D""": ["""B"""],
"""E""": ["""A""", """B""", """D"""],
"""F""": ["""C"""],
"""G""": ["""C"""],
}
class SCREAMING_SNAKE_CASE :
def __init__( self : Tuple , __lowercase : dict[str, list[str]] , __lowercase : str ):
'''simple docstring'''
__a = graph
# mapping node to its parent in resulting breadth first tree
__a = {}
__a = source_vertex
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
__a = {self.source_vertex}
__a = None
__a = [self.source_vertex] # first in first out queue
while queue:
__a = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(__lowercase )
__a = vertex
queue.append(__lowercase )
def UpperCamelCase_ ( self : Optional[int] , __lowercase : str ):
'''simple docstring'''
if target_vertex == self.source_vertex:
return self.source_vertex
__a = self.parent.get(__lowercase )
if target_vertex_parent is None:
__a = (
F"No path from vertex: {self.source_vertex} to vertex: {target_vertex}"
)
raise ValueError(__lowercase )
return self.shortest_path(__lowercase ) + F"->{target_vertex}"
if __name__ == "__main__":
lowerCamelCase__ = Graph(graph, """G""")
g.breath_first_search()
print(g.shortest_path("""D"""))
print(g.shortest_path("""G"""))
print(g.shortest_path("""Foo"""))
| 302
| 0
|
"""simple docstring"""
from __future__ import annotations
lowerCamelCase_ : List[str] = """#"""
class __A :
"""simple docstring"""
def __init__( self ) -> Union[str, Any]:
a ={}
def SCREAMING_SNAKE_CASE ( self , __A ) -> List[Any]:
a =self._trie
for char in text:
if char not in trie:
a ={}
a =trie[char]
a =True
def SCREAMING_SNAKE_CASE ( self , __A ) -> str:
a =self._trie
for char in prefix:
if char in trie:
a =trie[char]
else:
return []
return self._elements(__lowercase )
def SCREAMING_SNAKE_CASE ( self , __A ) -> str:
a =[]
for c, v in d.items():
a =[''' '''] if c == END else [(c + s) for s in self._elements(__lowercase )]
result.extend(__lowercase )
return tuple(__lowercase )
lowerCamelCase_ : int = Trie()
lowerCamelCase_ : List[str] = ("""depart""", """detergent""", """daring""", """dog""", """deer""", """deal""")
for word in words:
trie.insert_word(word)
def _A ( lowercase ):
"""simple docstring"""
a =trie.find_word(_SCREAMING_SNAKE_CASE )
return tuple(string + word for word in suffixes )
def _A ( ):
"""simple docstring"""
print(autocomplete_using_trie('''de''' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 81
|
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ , unittest.TestCase ):
__lowerCamelCase : Tuple =KandinskyVaaPriorPipeline
__lowerCamelCase : Union[str, Any] =['prompt']
__lowerCamelCase : Any =['prompt', 'negative_prompt']
__lowerCamelCase : List[str] =[
'num_images_per_prompt',
'generator',
'num_inference_steps',
'latents',
'negative_prompt',
'guidance_scale',
'output_type',
'return_dict',
]
__lowerCamelCase : List[Any] =False
@property
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
return 32
@property
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
return 32
@property
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
return self.time_input_dim
@property
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
return 100
@property
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
__a = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
return tokenizer
@property
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
torch.manual_seed(0 )
__a = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(__lowercase )
@property
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
torch.manual_seed(0 )
__a = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 12,
"""embedding_dim""": self.text_embedder_hidden_size,
"""num_layers""": 1,
}
__a = PriorTransformer(**__lowercase )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
__a = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
__a = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=224 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=14 , )
__a = CLIPVisionModelWithProjection(__lowercase )
return model
@property
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
__a = CLIPImageProcessor(
crop_size=224 , do_center_crop=__lowercase , do_normalize=__lowercase , do_resize=__lowercase , image_mean=[0.48145466, 0.4578275, 0.40821073] , image_std=[0.26862954, 0.26130258, 0.27577711] , resample=3 , size=224 , )
return image_processor
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
__a = self.dummy_prior
__a = self.dummy_image_encoder
__a = self.dummy_text_encoder
__a = self.dummy_tokenizer
__a = self.dummy_image_processor
__a = UnCLIPScheduler(
variance_type="""fixed_small_log""" , prediction_type="""sample""" , num_train_timesteps=1000 , clip_sample=__lowercase , clip_sample_range=10.0 , )
__a = {
"""prior""": prior,
"""image_encoder""": image_encoder,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""scheduler""": scheduler,
"""image_processor""": image_processor,
}
return components
def UpperCamelCase_ ( self : Optional[int] , __lowercase : List[str] , __lowercase : Any=0 ):
'''simple docstring'''
if str(__lowercase ).startswith("""mps""" ):
__a = torch.manual_seed(__lowercase )
else:
__a = torch.Generator(device=__lowercase ).manual_seed(__lowercase )
__a = {
"""prompt""": """horse""",
"""generator""": generator,
"""guidance_scale""": 4.0,
"""num_inference_steps""": 2,
"""output_type""": """np""",
}
return inputs
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
__a = """cpu"""
__a = self.get_dummy_components()
__a = self.pipeline_class(**__lowercase )
__a = pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
__a = pipe(**self.get_dummy_inputs(__lowercase ) )
__a = output.image_embeds
__a = pipe(
**self.get_dummy_inputs(__lowercase ) , return_dict=__lowercase , )[0]
__a = image[0, -10:]
__a = image_from_tuple[0, -10:]
assert image.shape == (1, 32)
__a = np.array(
[-0.0532, 1.7120, 0.3656, -1.0852, -0.8946, -1.1756, 0.4348, 0.2482, 0.5146, -0.1156] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
__a = torch_device == """cpu"""
__a = True
__a = False
self._test_inference_batch_single_identical(
test_max_difference=__lowercase , relax_max_difference=__lowercase , test_mean_pixel_difference=__lowercase , )
@skip_mps
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
__a = torch_device == """cpu"""
__a = False
self._test_attention_slicing_forward_pass(
test_max_difference=__lowercase , test_mean_pixel_difference=__lowercase , )
| 302
| 0
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCAmelCase_ = {
'configuration_efficientnet': [
'EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP',
'EfficientNetConfig',
'EfficientNetOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['EfficientNetImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'EfficientNetForImageClassification',
'EfficientNetModel',
'EfficientNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_efficientnet import (
EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
EfficientNetConfig,
EfficientNetOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientnet import EfficientNetImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientnet import (
EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientNetForImageClassification,
EfficientNetModel,
EfficientNetPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 16
|
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = Dict[str, Any]
lowerCamelCase__ = List[Prediction]
@add_end_docstrings(lowerCamelCase__ )
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
def __init__( self : Tuple , *__lowercase : Tuple , **__lowercase : Optional[int] ):
'''simple docstring'''
super().__init__(*__lowercase , **__lowercase )
if self.framework == "tf":
raise ValueError(F"The {self.__class__} is only available in PyTorch." )
requires_backends(self , """vision""" )
self.check_model_type(
dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) )
def UpperCamelCase_ ( self : Optional[int] , **__lowercase : List[str] ):
'''simple docstring'''
__a = {}
if "threshold" in kwargs:
__a = kwargs["""threshold"""]
return {}, {}, postprocess_kwargs
def __call__( self : List[Any] , *__lowercase : Any , **__lowercase : Tuple ):
'''simple docstring'''
return super().__call__(*__lowercase , **__lowercase )
def UpperCamelCase_ ( self : str , __lowercase : Tuple ):
'''simple docstring'''
__a = load_image(__lowercase )
__a = torch.IntTensor([[image.height, image.width]] )
__a = self.image_processor(images=[image] , return_tensors="""pt""" )
if self.tokenizer is not None:
__a = self.tokenizer(text=inputs["""words"""] , boxes=inputs["""boxes"""] , return_tensors="""pt""" )
__a = target_size
return inputs
def UpperCamelCase_ ( self : Dict , __lowercase : List[str] ):
'''simple docstring'''
__a = model_inputs.pop("""target_size""" )
__a = self.model(**__lowercase )
__a = outputs.__class__({"""target_size""": target_size, **outputs} )
if self.tokenizer is not None:
__a = model_inputs["""bbox"""]
return model_outputs
def UpperCamelCase_ ( self : Optional[int] , __lowercase : List[Any] , __lowercase : List[Any]=0.9 ):
'''simple docstring'''
__a = model_outputs["""target_size"""]
if self.tokenizer is not None:
# This is a LayoutLMForTokenClassification variant.
# The OCR got the boxes and the model classified the words.
__a , __a = target_size[0].tolist()
def unnormalize(__lowercase : Optional[Any] ):
return self._get_bounding_box(
torch.Tensor(
[
(width * bbox[0] / 1000),
(height * bbox[1] / 1000),
(width * bbox[2] / 1000),
(height * bbox[3] / 1000),
] ) )
__a , __a = model_outputs["""logits"""].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 )
__a = [self.model.config.idalabel[prediction] for prediction in classes.tolist()]
__a = [unnormalize(__lowercase ) for bbox in model_outputs["""bbox"""].squeeze(0 )]
__a = ["""score""", """label""", """box"""]
__a = [dict(zip(__lowercase , __lowercase ) ) for vals in zip(scores.tolist() , __lowercase , __lowercase ) if vals[0] > threshold]
else:
# This is a regular ForObjectDetectionModel
__a = self.image_processor.post_process_object_detection(__lowercase , __lowercase , __lowercase )
__a = raw_annotations[0]
__a = raw_annotation["""scores"""]
__a = raw_annotation["""labels"""]
__a = raw_annotation["""boxes"""]
__a = scores.tolist()
__a = [self.model.config.idalabel[label.item()] for label in labels]
__a = [self._get_bounding_box(__lowercase ) for box in boxes]
# {"scores": [...], ...} --> [{"score":x, ...}, ...]
__a = ["""score""", """label""", """box"""]
__a = [
dict(zip(__lowercase , __lowercase ) )
for vals in zip(raw_annotation["""scores"""] , raw_annotation["""labels"""] , raw_annotation["""boxes"""] )
]
return annotation
def UpperCamelCase_ ( self : Optional[int] , __lowercase : "torch.Tensor" ):
'''simple docstring'''
if self.framework != "pt":
raise ValueError("""The ObjectDetectionPipeline is only available in PyTorch.""" )
__a , __a , __a , __a = box.int().tolist()
__a = {
"""xmin""": xmin,
"""ymin""": ymin,
"""xmax""": xmax,
"""ymax""": ymax,
}
return bbox
| 302
| 0
|
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=lowerCamelCase__ )
class _snake_case ( lowerCamelCase__ ):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
UpperCamelCase__ = field(default='text-classification' , metadata={'include_in_asdict_even_if_is_default': True} )
UpperCamelCase__ = Features({'text': Value('string' )} )
UpperCamelCase__ = Features({'labels': ClassLabel} )
UpperCamelCase__ = "text"
UpperCamelCase__ = "labels"
def SCREAMING_SNAKE_CASE ( self , _a ):
if self.label_column not in features:
raise ValueError(f'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] , __lowercase ):
raise ValueError(f'''Column {self.label_column} is not a ClassLabel.''' )
__magic_name__ : List[Any] = copy.deepcopy(self )
__magic_name__ : Dict = self.label_schema.copy()
__magic_name__ : Union[str, Any] = features[self.label_column]
__magic_name__ : Optional[int] = label_schema
return task_template
@property
def SCREAMING_SNAKE_CASE ( self ):
return {
self.text_column: "text",
self.label_column: "labels",
}
| 281
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCamelCase__ = {
"""configuration_efficientnet""": [
"""EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""EfficientNetConfig""",
"""EfficientNetOnnxConfig""",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""EfficientNetImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""EfficientNetForImageClassification""",
"""EfficientNetModel""",
"""EfficientNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_efficientnet import (
EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
EfficientNetConfig,
EfficientNetOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientnet import EfficientNetImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientnet import (
EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientNetForImageClassification,
EfficientNetModel,
EfficientNetPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 302
| 0
|
'''simple docstring'''
import json
import logging
import math
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from datasets import Dataset, load_dataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
AutoConfig,
AutoModelForMaskedLM,
AutoTokenizer,
DataCollatorForWholeWordMask,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
_A : str =logging.getLogger(__name__)
_A : Optional[int] =list(MODEL_FOR_MASKED_LM_MAPPING.keys())
_A : Optional[Any] =tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class _lowercase :
a = field(
default=lowerCamelCase__ , metadata={
"""help""": (
"""The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch."""
)
} , )
a = field(
default=lowerCamelCase__ , metadata={"""help""": """If training from scratch, pass a model type from the list: """ + """, """.join(lowerCamelCase__ )} , )
a = field(
default=lowerCamelCase__ , metadata={
"""help""": (
"""Override some existing default config settings when a model is trained from scratch. Example: """
"""n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"""
)
} , )
a = field(
default=lowerCamelCase__ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
a = field(
default=lowerCamelCase__ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
a = field(
default=lowerCamelCase__ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
a = field(
default=lowerCamelCase__ , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
a = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
a = field(
default=lowerCamelCase__ , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
def lowerCamelCase_ ( self: List[Any] ):
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
raise ValueError(
"""--config_overrides can't be used in combination with --config_name or --model_name_or_path""" )
@dataclass
class _lowercase :
a = field(
default=lowerCamelCase__ , metadata={"""help""": """The name of the dataset to use (via the datasets library)."""} )
a = field(
default=lowerCamelCase__ , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
a = field(default=lowerCamelCase__ , metadata={"""help""": """The input training data file (a text file)."""} )
a = field(
default=lowerCamelCase__ , metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""} , )
a = field(
default=lowerCamelCase__ , metadata={"""help""": """An optional input train ref data file for whole word masking in Chinese."""} , )
a = field(
default=lowerCamelCase__ , metadata={"""help""": """An optional input validation ref data file for whole word masking in Chinese."""} , )
a = field(
default=lowerCamelCase__ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
a = field(
default=5 , metadata={
"""help""": """The percentage of the train set used as validation set in case there\'s no validation split"""
} , )
a = field(
default=lowerCamelCase__ , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated. Default to the max input length of the model."""
)
} , )
a = field(
default=lowerCamelCase__ , metadata={"""help""": """The number of processes to use for the preprocessing."""} , )
a = field(
default=0.15 , metadata={"""help""": """Ratio of tokens to mask for masked language modeling loss"""} )
a = field(
default=lowerCamelCase__ , metadata={
"""help""": (
"""Whether to pad all samples to `max_seq_length`. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch."""
)
} , )
def lowerCamelCase_ ( self: Optional[int] ):
if self.train_file is not None:
lowerCamelCase__ : Optional[Any] = self.train_file.split(""".""" )[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
if self.validation_file is not None:
lowerCamelCase__ : Union[str, Any] = self.validation_file.split(""".""" )[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> List[str]:
with open(_SCREAMING_SNAKE_CASE , """r""" , encoding="""utf-8""" ) as f:
lowerCamelCase__ : Optional[int] = [json.loads(_SCREAMING_SNAKE_CASE ) for line in f.read().splitlines() if (len(_SCREAMING_SNAKE_CASE ) > 0 and not line.isspace())]
assert len(_SCREAMING_SNAKE_CASE ) == len(_SCREAMING_SNAKE_CASE )
lowerCamelCase__ : Dict = {c: dataset[c] for c in dataset.column_names}
lowerCamelCase__ : List[Any] = refs
return Dataset.from_dict(_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ () -> Dict:
lowerCamelCase__ : List[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Tuple = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : str = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
lowerCamelCase__ : Optional[int] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCamelCase__ : Union[str, Any] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None:
logger.info(
f'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ f'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("""Training/evaluation parameters %s""" , _SCREAMING_SNAKE_CASE )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
lowerCamelCase__ : List[str] = load_dataset(data_args.dataset_name , data_args.dataset_config_name )
if "validation" not in datasets.keys():
lowerCamelCase__ : Optional[int] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f'''train[:{data_args.validation_split_percentage}%]''' , )
lowerCamelCase__ : str = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f'''train[{data_args.validation_split_percentage}%:]''' , )
else:
lowerCamelCase__ : Tuple = {}
if data_args.train_file is not None:
lowerCamelCase__ : Optional[Any] = data_args.train_file
if data_args.validation_file is not None:
lowerCamelCase__ : Optional[int] = data_args.validation_file
lowerCamelCase__ : Optional[int] = data_args.train_file.split(""".""" )[-1]
if extension == "txt":
lowerCamelCase__ : Dict = """text"""
lowerCamelCase__ : List[str] = load_dataset(_SCREAMING_SNAKE_CASE , data_files=_SCREAMING_SNAKE_CASE )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCamelCase__ : Dict = {
"""cache_dir""": model_args.cache_dir,
"""revision""": model_args.model_revision,
"""use_auth_token""": True if model_args.use_auth_token else None,
}
if model_args.config_name:
lowerCamelCase__ : int = AutoConfig.from_pretrained(model_args.config_name , **_SCREAMING_SNAKE_CASE )
elif model_args.model_name_or_path:
lowerCamelCase__ : Optional[Any] = AutoConfig.from_pretrained(model_args.model_name_or_path , **_SCREAMING_SNAKE_CASE )
else:
lowerCamelCase__ : int = CONFIG_MAPPING[model_args.model_type]()
logger.warning("""You are instantiating a new config instance from scratch.""" )
if model_args.config_overrides is not None:
logger.info(f'''Overriding config: {model_args.config_overrides}''' )
config.update_from_string(model_args.config_overrides )
logger.info(f'''New config: {config}''' )
lowerCamelCase__ : Union[str, Any] = {
"""cache_dir""": model_args.cache_dir,
"""use_fast""": model_args.use_fast_tokenizer,
"""revision""": model_args.model_revision,
"""use_auth_token""": True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
lowerCamelCase__ : Any = AutoTokenizer.from_pretrained(model_args.tokenizer_name , **_SCREAMING_SNAKE_CASE )
elif model_args.model_name_or_path:
lowerCamelCase__ : Union[str, Any] = AutoTokenizer.from_pretrained(model_args.model_name_or_path , **_SCREAMING_SNAKE_CASE )
else:
raise ValueError(
"""You are instantiating a new tokenizer from scratch. This is not supported by this script."""
"""You can do it from another script, save it, and load it from here, using --tokenizer_name.""" )
if model_args.model_name_or_path:
lowerCamelCase__ : int = AutoModelForMaskedLM.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=_SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info("""Training new model from scratch""" )
lowerCamelCase__ : Dict = AutoModelForMaskedLM.from_config(_SCREAMING_SNAKE_CASE )
model.resize_token_embeddings(len(_SCREAMING_SNAKE_CASE ) )
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
lowerCamelCase__ : Union[str, Any] = datasets["""train"""].column_names
else:
lowerCamelCase__ : int = datasets["""validation"""].column_names
lowerCamelCase__ : List[str] = """text""" if """text""" in column_names else column_names[0]
lowerCamelCase__ : Tuple = """max_length""" if data_args.pad_to_max_length else False
def tokenize_function(UpperCamelCase ):
# Remove empty lines
lowerCamelCase__ : int = [line for line in examples["""text"""] if len(_SCREAMING_SNAKE_CASE ) > 0 and not line.isspace()]
return tokenizer(examples["""text"""] , padding=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , max_length=data_args.max_seq_length )
lowerCamelCase__ : List[Any] = datasets.map(
_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE , num_proc=data_args.preprocessing_num_workers , remove_columns=[text_column_name] , load_from_cache_file=not data_args.overwrite_cache , )
# Add the chinese references if provided
if data_args.train_ref_file is not None:
lowerCamelCase__ : Optional[int] = add_chinese_references(tokenized_datasets["""train"""] , data_args.train_ref_file )
if data_args.validation_ref_file is not None:
lowerCamelCase__ : Tuple = add_chinese_references(
tokenized_datasets["""validation"""] , data_args.validation_ref_file )
# If we have ref files, need to avoid it removed by trainer
lowerCamelCase__ : Tuple = data_args.train_ref_file or data_args.validation_ref_file
if has_ref:
lowerCamelCase__ : Optional[Any] = False
# Data collator
# This one will take care of randomly masking the tokens.
lowerCamelCase__ : Optional[int] = DataCollatorForWholeWordMask(tokenizer=_SCREAMING_SNAKE_CASE , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
lowerCamelCase__ : Optional[int] = Trainer(
model=_SCREAMING_SNAKE_CASE , args=_SCREAMING_SNAKE_CASE , train_dataset=tokenized_datasets["""train"""] if training_args.do_train else None , eval_dataset=tokenized_datasets["""validation"""] if training_args.do_eval else None , tokenizer=_SCREAMING_SNAKE_CASE , data_collator=_SCREAMING_SNAKE_CASE , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
lowerCamelCase__ : Tuple = last_checkpoint
elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ):
lowerCamelCase__ : List[str] = model_args.model_name_or_path
else:
lowerCamelCase__ : Any = None
lowerCamelCase__ : Union[str, Any] = trainer.train(resume_from_checkpoint=_SCREAMING_SNAKE_CASE )
trainer.save_model() # Saves the tokenizer too for easy upload
lowerCamelCase__ : List[Any] = os.path.join(training_args.output_dir , """train_results.txt""" )
if trainer.is_world_process_zero():
with open(_SCREAMING_SNAKE_CASE , """w""" ) as writer:
logger.info("""***** Train results *****""" )
for key, value in sorted(train_result.metrics.items() ):
logger.info(f''' {key} = {value}''' )
writer.write(f'''{key} = {value}\n''' )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , """trainer_state.json""" ) )
# Evaluation
lowerCamelCase__ : Dict = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
lowerCamelCase__ : Optional[Any] = trainer.evaluate()
lowerCamelCase__ : Dict = math.exp(eval_output["""eval_loss"""] )
lowerCamelCase__ : int = perplexity
lowerCamelCase__ : List[Any] = os.path.join(training_args.output_dir , """eval_results_mlm_wwm.txt""" )
if trainer.is_world_process_zero():
with open(_SCREAMING_SNAKE_CASE , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key, value in sorted(results.items() ):
logger.info(f''' {key} = {value}''' )
writer.write(f'''{key} = {value}\n''' )
return results
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Tuple:
main()
if __name__ == "__main__":
main()
| 41
|
import random
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
__a , __a , __a = [], [], []
for element in data:
if element < pivot:
less.append(_SCREAMING_SNAKE_CASE )
elif element > pivot:
greater.append(_SCREAMING_SNAKE_CASE )
else:
equal.append(_SCREAMING_SNAKE_CASE )
return less, equal, greater
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
if index >= len(_SCREAMING_SNAKE_CASE ) or index < 0:
return None
__a = items[random.randint(0 , len(_SCREAMING_SNAKE_CASE ) - 1 )]
__a = 0
__a , __a , __a = _partition(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__a = len(_SCREAMING_SNAKE_CASE )
__a = len(_SCREAMING_SNAKE_CASE )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# must be in larger
else:
return quick_select(_SCREAMING_SNAKE_CASE , index - (m + count) )
| 302
| 0
|
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def UpperCamelCase__( UpperCamelCase__ : int )->Union[str, Any]:
A__ = [2, 2, 6, 2] if '''tiny''' in model_name else [2, 2, 18, 2]
A__ = True if '''large''' in model_name or '''huge''' in model_name else False
A__ = True if '''large''' in model_name or '''huge''' in model_name else False
A__ = True if '''large''' in model_name or '''huge''' in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
A__ = [3, 3, 3, 3]
A__ = [5, 5, 5, 5]
elif "fl4" in model_name:
A__ = [4, 4, 4, 4]
A__ = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
A__ = [3, 3, 3, 3]
if "lrf" in model_name:
A__ = [3, 3, 3, 3]
else:
A__ = [2, 2, 2, 2]
if "tiny" in model_name:
A__ = 96
elif "small" in model_name:
A__ = 96
elif "base" in model_name:
A__ = 1_28
elif "large" in model_name:
A__ = 1_92
elif "xlarge" in model_name:
A__ = 2_56
elif "huge" in model_name:
A__ = 3_52
# set label information
A__ = '''huggingface/label-files'''
if "large" in model_name or "huge" in model_name:
A__ = '''imagenet-22k-id2label.json'''
else:
A__ = '''imagenet-1k-id2label.json'''
A__ = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type='''dataset''' ) , '''r''' ) )
A__ = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
A__ = {v: k for k, v in idalabel.items()}
A__ = FocalNetConfig(
embed_dim=_SCREAMING_SNAKE_CASE , depths=_SCREAMING_SNAKE_CASE , focal_levels=_SCREAMING_SNAKE_CASE , focal_windows=_SCREAMING_SNAKE_CASE , use_conv_embed=_SCREAMING_SNAKE_CASE , idalabel=_SCREAMING_SNAKE_CASE , labelaid=_SCREAMING_SNAKE_CASE , use_post_layernorm=_SCREAMING_SNAKE_CASE , use_layerscale=_SCREAMING_SNAKE_CASE , )
return config
def UpperCamelCase__( UpperCamelCase__ : Tuple )->Union[str, Any]:
if "patch_embed.proj" in name:
A__ = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
A__ = name.replace('''patch_embed.norm''' , '''embeddings.norm''' )
if "layers" in name:
A__ = '''encoder.''' + name
if "encoder.layers" in name:
A__ = name.replace('''encoder.layers''' , '''encoder.stages''' )
if "downsample.proj" in name:
A__ = name.replace('''downsample.proj''' , '''downsample.projection''' )
if "blocks" in name:
A__ = name.replace('''blocks''' , '''layers''' )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
A__ = name.replace('''modulation.f''' , '''modulation.projection_in''' )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
A__ = name.replace('''modulation.h''' , '''modulation.projection_context''' )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
A__ = name.replace('''modulation.proj''' , '''modulation.projection_out''' )
if name == "norm.weight":
A__ = '''layernorm.weight'''
if name == "norm.bias":
A__ = '''layernorm.bias'''
if "head" in name:
A__ = name.replace('''head''' , '''classifier''' )
else:
A__ = '''focalnet.''' + name
return name
def UpperCamelCase__( UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict=False )->Union[str, Any]:
A__ = {
'''focalnet-tiny''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth''',
'''focalnet-tiny-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth''',
'''focalnet-small''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth''',
'''focalnet-small-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth''',
'''focalnet-base''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth''',
'''focalnet-base-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth''',
'''focalnet-large-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth''',
'''focalnet-large-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth''',
'''focalnet-xlarge-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth''',
'''focalnet-xlarge-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth''',
}
# fmt: on
A__ = model_name_to_url[model_name]
print('''Checkpoint URL: ''' , _SCREAMING_SNAKE_CASE )
A__ = torch.hub.load_state_dict_from_url(_SCREAMING_SNAKE_CASE , map_location='''cpu''' )['''model''']
# rename keys
for key in state_dict.copy().keys():
A__ = state_dict.pop(_SCREAMING_SNAKE_CASE )
A__ = val
A__ = get_focalnet_config(_SCREAMING_SNAKE_CASE )
A__ = FocalNetForImageClassification(_SCREAMING_SNAKE_CASE )
model.eval()
# load state dict
model.load_state_dict(_SCREAMING_SNAKE_CASE )
# verify conversion
A__ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
A__ = BitImageProcessor(
do_resize=_SCREAMING_SNAKE_CASE , size={'''shortest_edge''': 2_56} , resample=PILImageResampling.BILINEAR , do_center_crop=_SCREAMING_SNAKE_CASE , crop_size=2_24 , do_normalize=_SCREAMING_SNAKE_CASE , image_mean=_SCREAMING_SNAKE_CASE , image_std=_SCREAMING_SNAKE_CASE , )
A__ = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw )
A__ = processor(images=_SCREAMING_SNAKE_CASE , return_tensors='''pt''' )
A__ = transforms.Compose(
[
transforms.Resize(2_56 ),
transforms.CenterCrop(2_24 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ),
] )
A__ = image_transforms(_SCREAMING_SNAKE_CASE ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , _SCREAMING_SNAKE_CASE , atol=1e-4 )
A__ = model(**_SCREAMING_SNAKE_CASE )
A__ = outputs.logits.argmax(-1 ).item()
print('''Predicted class:''' , model.config.idalabel[predicted_class_idx] )
print('''First values of logits:''' , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
A__ = torch.tensor([0.2166, -0.4368, 0.2191] )
elif model_name == "focalnet-tiny-lrf":
A__ = torch.tensor([1.1669, 0.0125, -0.1695] )
elif model_name == "focalnet-small":
A__ = torch.tensor([0.4917, -0.0430, 0.1341] )
elif model_name == "focalnet-small-lrf":
A__ = torch.tensor([-0.2588, -0.5342, -0.2331] )
elif model_name == "focalnet-base":
A__ = torch.tensor([-0.1655, -0.4090, -0.1730] )
elif model_name == "focalnet-base-lrf":
A__ = torch.tensor([0.5306, -0.0483, -0.3928] )
assert torch.allclose(outputs.logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f"Saving model and processor of {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
processor.save_pretrained(_SCREAMING_SNAKE_CASE )
if push_to_hub:
print(f"Pushing model and processor of {model_name} to the hub..." )
model.push_to_hub(f"{model_name}" )
processor.push_to_hub(f"{model_name}" )
if __name__ == "__main__":
a__: str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='focalnet-tiny',
type=str,
help='Name of the FocalNet model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub.',
)
a__: str = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 193
|
from collections import UserDict
from typing import Union
import numpy as np
import requests
from ..utils import (
add_end_docstrings,
logging,
)
from .audio_classification import ffmpeg_read
from .base import PIPELINE_INIT_ARGS, Pipeline
lowerCamelCase__ = logging.get_logger(__name__)
@add_end_docstrings(lowerCamelCase__ )
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
def __init__( self : Optional[int] , **__lowercase : Dict ):
'''simple docstring'''
super().__init__(**__lowercase )
if self.framework != "pt":
raise ValueError(F"The {self.__class__} is only available in PyTorch." )
# No specific FOR_XXX available yet
def __call__( self : str , __lowercase : Union[np.ndarray, bytes, str] , **__lowercase : int ):
'''simple docstring'''
return super().__call__(__lowercase , **__lowercase )
def UpperCamelCase_ ( self : List[Any] , **__lowercase : Union[str, Any] ):
'''simple docstring'''
__a = {}
if "candidate_labels" in kwargs:
__a = kwargs["""candidate_labels"""]
if "hypothesis_template" in kwargs:
__a = kwargs["""hypothesis_template"""]
return preprocess_params, {}, {}
def UpperCamelCase_ ( self : int , __lowercase : Dict , __lowercase : Dict=None , __lowercase : str="This is a sound of {}." ):
'''simple docstring'''
if isinstance(__lowercase , __lowercase ):
if audio.startswith("""http://""" ) or audio.startswith("""https://""" ):
# We need to actually check for a real protocol, otherwise it's impossible to use a local file
# like http_huggingface_co.png
__a = requests.get(__lowercase ).content
else:
with open(__lowercase , """rb""" ) as f:
__a = f.read()
if isinstance(__lowercase , __lowercase ):
__a = ffmpeg_read(__lowercase , self.feature_extractor.sampling_rate )
if not isinstance(__lowercase , np.ndarray ):
raise ValueError("""We expect a numpy ndarray as input""" )
if len(audio.shape ) != 1:
raise ValueError("""We expect a single channel audio input for ZeroShotAudioClassificationPipeline""" )
__a = self.feature_extractor(
[audio] , sampling_rate=self.feature_extractor.sampling_rate , return_tensors="""pt""" )
__a = candidate_labels
__a = [hypothesis_template.format(__lowercase ) for x in candidate_labels]
__a = self.tokenizer(__lowercase , return_tensors=self.framework , padding=__lowercase )
__a = [text_inputs]
return inputs
def UpperCamelCase_ ( self : Any , __lowercase : Any ):
'''simple docstring'''
__a = model_inputs.pop("""candidate_labels""" )
__a = model_inputs.pop("""text_inputs""" )
if isinstance(text_inputs[0] , __lowercase ):
__a = text_inputs[0]
else:
# Batching case.
__a = text_inputs[0][0]
__a = self.model(**__lowercase , **__lowercase )
__a = {
"""candidate_labels""": candidate_labels,
"""logits""": outputs.logits_per_audio,
}
return model_outputs
def UpperCamelCase_ ( self : Optional[Any] , __lowercase : Dict ):
'''simple docstring'''
__a = model_outputs.pop("""candidate_labels""" )
__a = model_outputs["""logits"""][0]
if self.framework == "pt":
__a = logits.softmax(dim=0 )
__a = probs.tolist()
else:
raise ValueError("""`tf` framework not supported.""" )
__a = [
{"""score""": score, """label""": candidate_label}
for score, candidate_label in sorted(zip(__lowercase , __lowercase ) , key=lambda __lowercase : -x[0] )
]
return result
| 302
| 0
|
def UpperCamelCase ( _A ):
"""simple docstring"""
__magic_name__ : List[str] = [0] * len(_SCREAMING_SNAKE_CASE )
for i in range(1, len(_SCREAMING_SNAKE_CASE ) ):
# use last results for better performance - dynamic programming
__magic_name__ : Optional[int] = prefix_result[i - 1]
while j > 0 and input_string[i] != input_string[j]:
__magic_name__ : Tuple = prefix_result[j - 1]
if input_string[i] == input_string[j]:
j += 1
__magic_name__ : List[str] = j
return prefix_result
def UpperCamelCase ( _A ):
"""simple docstring"""
return max(prefix_function(_SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 342
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_batched,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
lowerCamelCase__ = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
__lowerCamelCase : Dict =['pixel_values']
def __init__( self : Optional[int] , __lowercase : bool = True , __lowercase : Optional[Dict[str, int]] = None , __lowercase : PILImageResampling = PILImageResampling.BICUBIC , __lowercase : bool = True , __lowercase : bool = True , __lowercase : Union[int, float] = 1 / 255 , __lowercase : Dict[str, int] = None , __lowercase : bool = True , __lowercase : Optional[Union[float, List[float]]] = None , __lowercase : Optional[Union[float, List[float]]] = None , **__lowercase : Dict , ):
'''simple docstring'''
super().__init__(**__lowercase )
__a = size if size is not None else {"""height""": 224, """width""": 224}
__a = get_size_dict(__lowercase )
__a = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
__a = get_size_dict(__lowercase , default_to_square=__lowercase , param_name="""crop_size""" )
__a = do_resize
__a = do_rescale
__a = do_normalize
__a = do_center_crop
__a = crop_size
__a = size
__a = resample
__a = rescale_factor
__a = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
__a = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def UpperCamelCase_ ( self : Any , __lowercase : np.ndarray , __lowercase : Dict[str, int] , __lowercase : PILImageResampling = PILImageResampling.BILINEAR , __lowercase : Optional[Union[str, ChannelDimension]] = None , **__lowercase : Optional[Any] , ):
'''simple docstring'''
__a = get_size_dict(__lowercase )
if "shortest_edge" in size:
__a = get_resize_output_image_size(__lowercase , size=size["""shortest_edge"""] , default_to_square=__lowercase )
# size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"])
elif "height" in size and "width" in size:
__a = (size["""height"""], size["""width"""])
else:
raise ValueError(F"Size must contain 'height' and 'width' keys or 'shortest_edge' key. Got {size.keys()}" )
return resize(__lowercase , size=__lowercase , resample=__lowercase , data_format=__lowercase , **__lowercase )
def UpperCamelCase_ ( self : str , __lowercase : np.ndarray , __lowercase : Dict[str, int] , __lowercase : Optional[Union[str, ChannelDimension]] = None , **__lowercase : List[Any] , ):
'''simple docstring'''
__a = get_size_dict(__lowercase )
if "height" not in size or "width" not in size:
raise ValueError(F"The `size` parameter must contain the keys (height, width). Got {size.keys()}" )
return center_crop(__lowercase , size=(size["""height"""], size["""width"""]) , data_format=__lowercase , **__lowercase )
def UpperCamelCase_ ( self : Any , __lowercase : np.ndarray , __lowercase : float , __lowercase : Optional[Union[str, ChannelDimension]] = None , **__lowercase : str ):
'''simple docstring'''
return rescale(__lowercase , scale=__lowercase , data_format=__lowercase , **__lowercase )
def UpperCamelCase_ ( self : List[Any] , __lowercase : np.ndarray , __lowercase : Union[float, List[float]] , __lowercase : Union[float, List[float]] , __lowercase : Optional[Union[str, ChannelDimension]] = None , **__lowercase : Any , ):
'''simple docstring'''
return normalize(__lowercase , mean=__lowercase , std=__lowercase , data_format=__lowercase , **__lowercase )
def UpperCamelCase_ ( self : Tuple , __lowercase : ImageInput , __lowercase : Optional[bool] = None , __lowercase : Dict[str, int] = None , __lowercase : PILImageResampling = None , __lowercase : bool = None , __lowercase : int = None , __lowercase : Optional[bool] = None , __lowercase : Optional[float] = None , __lowercase : Optional[bool] = None , __lowercase : Optional[Union[float, List[float]]] = None , __lowercase : Optional[Union[float, List[float]]] = None , __lowercase : Optional[Union[str, TensorType]] = None , __lowercase : Union[str, ChannelDimension] = ChannelDimension.FIRST , **__lowercase : List[Any] , ):
'''simple docstring'''
__a = do_resize if do_resize is not None else self.do_resize
__a = do_rescale if do_rescale is not None else self.do_rescale
__a = do_normalize if do_normalize is not None else self.do_normalize
__a = do_center_crop if do_center_crop is not None else self.do_center_crop
__a = crop_size if crop_size is not None else self.crop_size
__a = get_size_dict(__lowercase , param_name="""crop_size""" , default_to_square=__lowercase )
__a = resample if resample is not None else self.resample
__a = rescale_factor if rescale_factor is not None else self.rescale_factor
__a = image_mean if image_mean is not None else self.image_mean
__a = image_std if image_std is not None else self.image_std
__a = size if size is not None else self.size
__a = get_size_dict(__lowercase )
if not is_batched(__lowercase ):
__a = [images]
if not valid_images(__lowercase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
# All transformations expect numpy arrays.
__a = [to_numpy_array(__lowercase ) for image in images]
if do_resize:
__a = [self.resize(image=__lowercase , size=__lowercase , resample=__lowercase ) for image in images]
if do_center_crop:
__a = [self.center_crop(image=__lowercase , size=__lowercase ) for image in images]
if do_rescale:
__a = [self.rescale(image=__lowercase , scale=__lowercase ) for image in images]
if do_normalize:
__a = [self.normalize(image=__lowercase , mean=__lowercase , std=__lowercase ) for image in images]
__a = [to_channel_dimension_format(__lowercase , __lowercase ) for image in images]
__a = {"""pixel_values""": images}
return BatchFeature(data=__lowercase , tensor_type=__lowercase )
| 302
| 0
|
'''simple docstring'''
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from transformers import GradientAccumulator, create_optimizer
@require_tf
class lowerCAmelCase_( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> Optional[Any]:
self.assertEqual(len(__lowercase ) ,len(__lowercase ) )
for a, b in zip(__lowercase ,__lowercase ):
self.assertAlmostEqual(__lowercase ,__lowercase ,delta=__lowercase )
def UpperCAmelCase_ ( self ) -> Tuple:
lowerCAmelCase__ : List[str] = GradientAccumulator()
accumulator([tf.constant([1.0, 2.0] )] )
accumulator([tf.constant([-2.0, 1.0] )] )
accumulator([tf.constant([-1.0, 2.0] )] )
with self.assertRaises(__lowercase ):
accumulator([tf.constant([1.0, 1.0] ), tf.constant([2.0, 2.0] )] )
self.assertEqual(accumulator.step ,3 )
self.assertEqual(len(accumulator.gradients ) ,1 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() ,[-2.0, 5.0] ,tol=1E-2 )
accumulator.reset()
self.assertEqual(accumulator.step ,0 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() ,[0.0, 0.0] ,tol=1E-2 )
def UpperCAmelCase_ ( self ) -> Tuple:
lowerCAmelCase__ : Dict = None
ops.enable_eager_execution_internal()
lowerCAmelCase__ : Tuple = tf.config.list_physical_devices("""CPU""" )
if len(__lowercase ) == 1:
tf.config.set_logical_device_configuration(
physical_devices[0] ,[tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()] )
lowerCAmelCase__ : str = tf.config.list_logical_devices(device_type="""CPU""" )
lowerCAmelCase__ : Tuple = tf.distribute.MirroredStrategy(devices=devices[:2] )
with strategy.scope():
lowerCAmelCase__ : Any = GradientAccumulator()
lowerCAmelCase__ : List[str] = tf.Variable([4.0, 3.0] )
lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = create_optimizer(5E-5 ,10 ,5 )
lowerCAmelCase__ : Optional[int] = tf.Variable([0.0, 0.0] ,trainable=__lowercase )
def accumulate_on_replica(__UpperCAmelCase ):
accumulator([gradient] )
def apply_on_replica():
optimizer.apply_gradients(list(zip(accumulator.gradients ,[variable] ) ) )
@tf.function
def accumulate(__UpperCAmelCase ,__UpperCAmelCase ):
with strategy.scope():
lowerCAmelCase__ : int = strategy.experimental_local_results(__lowercase )
local_variables[0].assign(__lowercase )
local_variables[1].assign(__lowercase )
strategy.run(__lowercase ,args=(gradient_placeholder,) )
@tf.function
def apply_grad():
with strategy.scope():
strategy.run(__lowercase )
def _check_local_values(__UpperCAmelCase ,__UpperCAmelCase ):
lowerCAmelCase__ : str = strategy.experimental_local_results(accumulator._gradients[0] )
self.assertListAlmostEqual(values[0].value() ,__lowercase ,tol=1E-2 )
self.assertListAlmostEqual(values[1].value() ,__lowercase ,tol=1E-2 )
accumulate([1.0, 2.0] ,[-1.0, 1.0] )
accumulate([3.0, -1.0] ,[-1.0, -1.0] )
accumulate([-2.0, 2.0] ,[3.0, -2.0] )
self.assertEqual(accumulator.step ,3 )
_check_local_values([2.0, 3.0] ,[1.0, -2.0] )
apply_grad()
self.assertListAlmostEqual(variable.value() ,[4.0, 3.0] ,tol=1E-2 )
accumulator.reset()
self.assertEqual(accumulator.step ,0 )
_check_local_values([0.0, 0.0] ,[0.0, 0.0] )
| 37
|
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
__a = """hf-internal-testing/tiny-random-t5"""
__a = AutoTokenizer.from_pretrained(__lowercase )
__a = AutoModelForSeqaSeqLM.from_pretrained(__lowercase )
__a = tokenizer("""This is me""" , return_tensors="""pt""" )
__a = model.to_bettertransformer()
self.assertTrue(any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model.named_modules() ) )
__a = model.generate(**__lowercase )
__a = model.reverse_bettertransformer()
self.assertFalse(any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model.named_modules() ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__lowercase )
__a = AutoModelForSeqaSeqLM.from_pretrained(__lowercase )
self.assertFalse(
any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) )
__a = model_reloaded.generate(**__lowercase )
self.assertTrue(torch.allclose(__lowercase , __lowercase ) )
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
__a = """hf-internal-testing/tiny-random-t5"""
__a = AutoModelForSeqaSeqLM.from_pretrained(__lowercase )
__a = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(__lowercase ):
model.save_pretrained(__lowercase )
__a = model.reverse_bettertransformer()
model.save_pretrained(__lowercase )
| 302
| 0
|
import argparse
import torch
from ...utils import logging
from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert
logging.set_verbosity_info()
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Dict , _lowerCamelCase : Any , _lowerCamelCase : int) -> Union[str, Any]:
'''simple docstring'''
__UpperCamelCase : Dict = AlbertConfig.from_json_file(_SCREAMING_SNAKE_CASE)
print(F'Building PyTorch model from configuration: {config}')
__UpperCamelCase : str = AlbertForPreTraining(_SCREAMING_SNAKE_CASE)
# Load weights from tf checkpoint
load_tf_weights_in_albert(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}')
torch.save(model.state_dict() , _SCREAMING_SNAKE_CASE)
if __name__ == "__main__":
lowercase : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--albert_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained ALBERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
lowercase : Optional[Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
| 232
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
lowerCamelCase__ = {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/config.json""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/config.json""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/config.json""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/config.json""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/config.json""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/config.json""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json""",
}
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
__lowerCamelCase : Optional[Any] ='albert'
def __init__( self : Optional[Any] , __lowercase : Union[str, Any]=30000 , __lowercase : List[str]=128 , __lowercase : Optional[Any]=4096 , __lowercase : Dict=12 , __lowercase : Any=1 , __lowercase : Optional[Any]=64 , __lowercase : Any=16384 , __lowercase : Any=1 , __lowercase : Union[str, Any]="gelu_new" , __lowercase : List[str]=0 , __lowercase : int=0 , __lowercase : Dict=512 , __lowercase : str=2 , __lowercase : List[str]=0.02 , __lowercase : Union[str, Any]=1E-12 , __lowercase : int=0.1 , __lowercase : Any="absolute" , __lowercase : Optional[int]=0 , __lowercase : Dict=2 , __lowercase : Optional[Any]=3 , **__lowercase : Any , ):
'''simple docstring'''
super().__init__(pad_token_id=__lowercase , bos_token_id=__lowercase , eos_token_id=__lowercase , **__lowercase )
__a = vocab_size
__a = embedding_size
__a = hidden_size
__a = num_hidden_layers
__a = num_hidden_groups
__a = num_attention_heads
__a = inner_group_num
__a = hidden_act
__a = intermediate_size
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = type_vocab_size
__a = initializer_range
__a = layer_norm_eps
__a = classifier_dropout_prob
__a = position_embedding_type
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
@property
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
if self.task == "multiple-choice":
__a = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
__a = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 302
| 0
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_electra import ElectraTokenizer
lowerCAmelCase__ = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
lowerCAmelCase__ = {
'vocab_file': {
'google/electra-small-generator': (
'https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt'
),
'google/electra-base-generator': 'https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt',
'google/electra-large-generator': (
'https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt'
),
'google/electra-small-discriminator': (
'https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt'
),
'google/electra-base-discriminator': (
'https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt'
),
'google/electra-large-discriminator': (
'https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'google/electra-small-generator': (
'https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json'
),
'google/electra-base-generator': (
'https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json'
),
'google/electra-large-generator': (
'https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json'
),
'google/electra-small-discriminator': (
'https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json'
),
'google/electra-base-discriminator': (
'https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json'
),
'google/electra-large-discriminator': (
'https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json'
),
},
}
lowerCAmelCase__ = {
'google/electra-small-generator': 5_12,
'google/electra-base-generator': 5_12,
'google/electra-large-generator': 5_12,
'google/electra-small-discriminator': 5_12,
'google/electra-base-discriminator': 5_12,
'google/electra-large-discriminator': 5_12,
}
lowerCAmelCase__ = {
'google/electra-small-generator': {'do_lower_case': True},
'google/electra-base-generator': {'do_lower_case': True},
'google/electra-large-generator': {'do_lower_case': True},
'google/electra-small-discriminator': {'do_lower_case': True},
'google/electra-base-discriminator': {'do_lower_case': True},
'google/electra-large-discriminator': {'do_lower_case': True},
}
class lowerCAmelCase__ ( lowerCamelCase__):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE = PRETRAINED_INIT_CONFIGURATION
__SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE = ElectraTokenizer
def __init__( self , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=True , __lowerCamelCase="[UNK]" , __lowerCamelCase="[SEP]" , __lowerCamelCase="[PAD]" , __lowerCamelCase="[CLS]" , __lowerCamelCase="[MASK]" , __lowerCamelCase=True , __lowerCamelCase=None , **__lowerCamelCase , ) -> Optional[Any]:
super().__init__(
__lowercase , tokenizer_file=__lowercase , do_lower_case=__lowercase , unk_token=__lowercase , sep_token=__lowercase , pad_token=__lowercase , cls_token=__lowercase , mask_token=__lowercase , tokenize_chinese_chars=__lowercase , strip_accents=__lowercase , **__lowercase , )
_A : Any = json.loads(self.backend_tokenizer.normalizer.__getstate__())
if (
normalizer_state.get("lowercase" , __lowercase) != do_lower_case
or normalizer_state.get("strip_accents" , __lowercase) != strip_accents
or normalizer_state.get("handle_chinese_chars" , __lowercase) != tokenize_chinese_chars
):
_A : Tuple = getattr(__lowercase , normalizer_state.pop("type"))
_A : Optional[Any] = do_lower_case
_A : Dict = strip_accents
_A : Optional[Any] = tokenize_chinese_chars
_A : List[Any] = normalizer_class(**__lowercase)
_A : List[Any] = do_lower_case
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase=None) -> Optional[Any]:
_A : str = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> Tuple:
_A : List[str] = [self.sep_token_id]
_A : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None) -> Tuple:
_A : int = self._tokenizer.model.save(__lowercase , name=__lowercase)
return tuple(__lowercase)
| 11
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase__ = {
"""configuration_blip""": [
"""BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BlipConfig""",
"""BlipTextConfig""",
"""BlipVisionConfig""",
],
"""processing_blip""": ["""BlipProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""BlipImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BlipModel""",
"""BlipPreTrainedModel""",
"""BlipForConditionalGeneration""",
"""BlipForQuestionAnswering""",
"""BlipVisionModel""",
"""BlipTextModel""",
"""BlipForImageTextRetrieval""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFBlipModel""",
"""TFBlipPreTrainedModel""",
"""TFBlipForConditionalGeneration""",
"""TFBlipForQuestionAnswering""",
"""TFBlipVisionModel""",
"""TFBlipTextModel""",
"""TFBlipForImageTextRetrieval""",
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 302
| 0
|
import argparse
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import (
RobertaTokenizer,
TrOCRConfig,
TrOCRForCausalLM,
TrOCRProcessor,
VisionEncoderDecoderModel,
ViTConfig,
ViTImageProcessor,
ViTModel,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase : Any = logging.get_logger(__name__)
def _SCREAMING_SNAKE_CASE ( a , a ) -> Tuple:
__A : Tuple = []
for i in range(encoder_config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F"""encoder.deit.blocks.{i}.norm1.weight""", F"""encoder.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""encoder.deit.blocks.{i}.norm1.bias""", F"""encoder.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(F"""encoder.deit.blocks.{i}.attn.proj.weight""", F"""encoder.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append(
(F"""encoder.deit.blocks.{i}.attn.proj.bias""", F"""encoder.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append(
(F"""encoder.deit.blocks.{i}.norm2.weight""", F"""encoder.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""encoder.deit.blocks.{i}.norm2.bias""", F"""encoder.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append(
(F"""encoder.deit.blocks.{i}.mlp.fc1.weight""", F"""encoder.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append(
(F"""encoder.deit.blocks.{i}.mlp.fc1.bias""", F"""encoder.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append(
(F"""encoder.deit.blocks.{i}.mlp.fc2.weight""", F"""encoder.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""encoder.deit.blocks.{i}.mlp.fc2.bias""", F"""encoder.encoder.layer.{i}.output.dense.bias""") )
# cls token, position embeddings and patch embeddings of encoder
rename_keys.extend(
[
('encoder.deit.cls_token', 'encoder.embeddings.cls_token'),
('encoder.deit.pos_embed', 'encoder.embeddings.position_embeddings'),
('encoder.deit.patch_embed.proj.weight', 'encoder.embeddings.patch_embeddings.projection.weight'),
('encoder.deit.patch_embed.proj.bias', 'encoder.embeddings.patch_embeddings.projection.bias'),
('encoder.deit.norm.weight', 'encoder.layernorm.weight'),
('encoder.deit.norm.bias', 'encoder.layernorm.bias'),
] )
return rename_keys
def _SCREAMING_SNAKE_CASE ( a , a ) -> Any:
for i in range(encoder_config.num_hidden_layers ):
# queries, keys and values (only weights, no biases)
__A : Any = state_dict.pop(F"""encoder.deit.blocks.{i}.attn.qkv.weight""" )
__A : Optional[int] = in_proj_weight[
: encoder_config.hidden_size, :
]
__A : str = in_proj_weight[
encoder_config.hidden_size : encoder_config.hidden_size * 2, :
]
__A : Any = in_proj_weight[
-encoder_config.hidden_size :, :
]
def _SCREAMING_SNAKE_CASE ( a , a , a ) -> str:
__A : Any = dct.pop(_SCREAMING_SNAKE_CASE )
__A : int = val
def _SCREAMING_SNAKE_CASE ( a ) -> Optional[Any]:
if "handwritten" in checkpoint_url:
__A : Optional[Any] = 'https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg' # industry
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" #
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg"
elif "printed" in checkpoint_url or "stage1" in checkpoint_url:
__A : Optional[int] = 'https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg'
__A : Dict = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw ).convert('RGB' )
return im
@torch.no_grad()
def _SCREAMING_SNAKE_CASE ( a , a ) -> Optional[Any]:
__A : int = ViTConfig(image_size=3_84 , qkv_bias=_SCREAMING_SNAKE_CASE )
__A : List[str] = TrOCRConfig()
# size of the architecture
if "base" in checkpoint_url:
__A : int = 7_68
elif "large" in checkpoint_url:
# use ViT-large encoder
__A : List[Any] = 10_24
__A : str = 40_96
__A : Any = 24
__A : Optional[int] = 16
__A : Tuple = 10_24
else:
raise ValueError('Should either find \'base\' or \'large\' in checkpoint URL' )
# the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards
if "large-printed" in checkpoint_url or "stage1" in checkpoint_url:
__A : List[Any] = False
__A : Union[str, Any] = 'relu'
__A : List[Any] = 10_24
__A : int = True
__A : str = False
__A : Tuple = False
# load HuggingFace model
__A : List[Any] = ViTModel(_SCREAMING_SNAKE_CASE , add_pooling_layer=_SCREAMING_SNAKE_CASE )
__A : Dict = TrOCRForCausalLM(_SCREAMING_SNAKE_CASE )
__A : Any = VisionEncoderDecoderModel(encoder=_SCREAMING_SNAKE_CASE , decoder=_SCREAMING_SNAKE_CASE )
model.eval()
# load state_dict of original model, rename some keys
__A : Tuple = torch.hub.load_state_dict_from_url(_SCREAMING_SNAKE_CASE , map_location='cpu' , check_hash=_SCREAMING_SNAKE_CASE )['model']
__A : Any = create_rename_keys(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for src, dest in rename_keys:
rename_key(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
read_in_q_k_v(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# remove parameters we don't need
del state_dict["encoder.deit.head.weight"]
del state_dict["encoder.deit.head.bias"]
del state_dict["decoder.version"]
# add prefix to decoder keys
for key, val in state_dict.copy().items():
__A : List[str] = state_dict.pop(_SCREAMING_SNAKE_CASE )
if key.startswith('decoder' ) and "output_projection" not in key:
__A : Union[str, Any] = val
else:
__A : Tuple = val
# load state dict
model.load_state_dict(_SCREAMING_SNAKE_CASE )
# Check outputs on an image
__A : Union[str, Any] = ViTImageProcessor(size=encoder_config.image_size )
__A : List[Any] = RobertaTokenizer.from_pretrained('roberta-large' )
__A : Union[str, Any] = TrOCRProcessor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__A : Optional[Any] = processor(images=prepare_img(_SCREAMING_SNAKE_CASE ) , return_tensors='pt' ).pixel_values
# verify logits
__A : str = torch.tensor([[model.config.decoder.decoder_start_token_id]] )
__A : str = model(pixel_values=_SCREAMING_SNAKE_CASE , decoder_input_ids=_SCREAMING_SNAKE_CASE )
__A : int = outputs.logits
__A : Optional[Any] = torch.Size([1, 1, 5_02_65] )
if "trocr-base-handwritten" in checkpoint_url:
__A : Tuple = torch.tensor(
[-1.4_502, -4.6_683, -0.5_347, -2.9_291, 9.1_435, -3.0_571, 8.9_764, 1.7_560, 8.7_358, -1.5_311] )
elif "trocr-large-handwritten" in checkpoint_url:
__A : Dict = torch.tensor(
[-2.6_437, -1.3_129, -2.2_596, -5.3_455, 6.3_539, 1.7_604, 5.4_991, 1.4_702, 5.6_113, 2.0_170] )
elif "trocr-base-printed" in checkpoint_url:
__A : Optional[Any] = torch.tensor(
[-5.6_816, -5.8_388, 1.1_398, -6.9_034, 6.8_505, -2.4_393, 1.2_284, -1.0_232, -1.9_661, -3.9_210] )
elif "trocr-large-printed" in checkpoint_url:
__A : Union[str, Any] = torch.tensor(
[-6.0_162, -7.0_959, 4.4_155, -5.1_063, 7.0_468, -3.1_631, 2.6_466, -0.3_081, -0.8_106, -1.7_535] )
if "stage1" not in checkpoint_url:
assert logits.shape == expected_shape, "Shape of logits not as expected"
assert torch.allclose(logits[0, 0, :10] , _SCREAMING_SNAKE_CASE , atol=1e-3 ), "First elements of logits not as expected"
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
print(F"""Saving processor to {pytorch_dump_folder_path}""" )
processor.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
UpperCAmelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_url''',
default='''https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt''',
type=str,
help='''URL to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
UpperCAmelCase : Optional[int] = parser.parse_args()
convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 280
|
class SCREAMING_SNAKE_CASE :
def __init__( self : List[Any] , __lowercase : Union[str, Any] ):
'''simple docstring'''
__a = val
__a = None
__a = None
def UpperCamelCase_ ( self : Union[str, Any] , __lowercase : Any ):
'''simple docstring'''
if self.val:
if val < self.val:
if self.left is None:
__a = Node(__lowercase )
else:
self.left.insert(__lowercase )
elif val > self.val:
if self.right is None:
__a = Node(__lowercase )
else:
self.right.insert(__lowercase )
else:
__a = val
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
if root:
inorder(root.left , _SCREAMING_SNAKE_CASE )
res.append(root.val )
inorder(root.right , _SCREAMING_SNAKE_CASE )
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
if len(_SCREAMING_SNAKE_CASE ) == 0:
return arr
__a = Node(arr[0] )
for i in range(1 , len(_SCREAMING_SNAKE_CASE ) ):
root.insert(arr[i] )
# Traverse BST in order.
__a = []
inorder(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return res
if __name__ == "__main__":
print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
| 302
| 0
|
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
__lowerCamelCase : List[Any] = logging.get_logger(__name__)
__lowerCamelCase : Any = {
'''deepmind/language-perceiver''': '''https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json''',
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class a__ ( lowerCamelCase__ ):
A = 'perceiver'
def __init__( self : Dict,_A : List[Any]=256,_A : List[Any]=1280,_A : Tuple=768,_A : str=1,_A : List[str]=26,_A : Any=8,_A : Dict=8,_A : Optional[Any]=None,_A : str=None,_A : Optional[int]="kv",_A : Dict=1,_A : Optional[int]=1,_A : List[str]="gelu",_A : List[str]=0.1,_A : Union[str, Any]=0.02,_A : Tuple=1E-12,_A : Optional[Any]=True,_A : List[str]=262,_A : List[Any]=2048,_A : str=56,_A : str=[368, 496],_A : Dict=16,_A : Union[str, Any]=1920,_A : Tuple=16,_A : List[str]=[1, 16, 224, 224],**_A : str,):
"""simple docstring"""
super().__init__(**__lowercase )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = num_latents
SCREAMING_SNAKE_CASE_ : Optional[int] = d_latents
SCREAMING_SNAKE_CASE_ : Optional[int] = d_model
SCREAMING_SNAKE_CASE_ : int = num_blocks
SCREAMING_SNAKE_CASE_ : str = num_self_attends_per_block
SCREAMING_SNAKE_CASE_ : Tuple = num_self_attention_heads
SCREAMING_SNAKE_CASE_ : Optional[Any] = num_cross_attention_heads
SCREAMING_SNAKE_CASE_ : Optional[Any] = qk_channels
SCREAMING_SNAKE_CASE_ : int = v_channels
SCREAMING_SNAKE_CASE_ : Optional[Any] = cross_attention_shape_for_attention
SCREAMING_SNAKE_CASE_ : List[Any] = self_attention_widening_factor
SCREAMING_SNAKE_CASE_ : List[str] = cross_attention_widening_factor
SCREAMING_SNAKE_CASE_ : Optional[Any] = hidden_act
SCREAMING_SNAKE_CASE_ : Optional[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : Tuple = initializer_range
SCREAMING_SNAKE_CASE_ : str = layer_norm_eps
SCREAMING_SNAKE_CASE_ : int = use_query_residual
# masked language modeling attributes
SCREAMING_SNAKE_CASE_ : List[str] = vocab_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = max_position_embeddings
# image classification attributes
SCREAMING_SNAKE_CASE_ : List[str] = image_size
# flow attributes
SCREAMING_SNAKE_CASE_ : int = train_size
# multimodal autoencoding attributes
SCREAMING_SNAKE_CASE_ : str = num_frames
SCREAMING_SNAKE_CASE_ : List[str] = audio_samples_per_frame
SCREAMING_SNAKE_CASE_ : List[Any] = samples_per_patch
SCREAMING_SNAKE_CASE_ : List[str] = output_shape
class a__ ( lowerCamelCase__ ):
@property
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE_ : Optional[int] = {0: "batch", 1: "choice", 2: "sequence"}
else:
SCREAMING_SNAKE_CASE_ : int = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("inputs", dynamic_axis),
("attention_mask", dynamic_axis),
] )
@property
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
return 1E-4
def __UpperCamelCase ( self : Any,_A : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"],_A : int = -1,_A : int = -1,_A : int = -1,_A : bool = False,_A : Optional[TensorType] = None,_A : int = 3,_A : int = 40,_A : int = 40,):
"""simple docstring"""
if isinstance(__lowercase,__lowercase ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE_ : Union[str, Any] = compute_effective_axis_dimension(
__lowercase,fixed_dimension=OnnxConfig.default_fixed_batch,num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE_ : Optional[int] = preprocessor.num_special_tokens_to_add(__lowercase )
SCREAMING_SNAKE_CASE_ : int = compute_effective_axis_dimension(
__lowercase,fixed_dimension=OnnxConfig.default_fixed_sequence,num_token_to_add=__lowercase )
# Generate dummy inputs according to compute batch and sequence
SCREAMING_SNAKE_CASE_ : Optional[int] = [" ".join(["a"] ) * seq_length] * batch_size
SCREAMING_SNAKE_CASE_ : Tuple = dict(preprocessor(__lowercase,return_tensors=__lowercase ) )
SCREAMING_SNAKE_CASE_ : List[Any] = inputs.pop("input_ids" )
return inputs
elif isinstance(__lowercase,__lowercase ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE_ : Union[str, Any] = compute_effective_axis_dimension(__lowercase,fixed_dimension=OnnxConfig.default_fixed_batch )
SCREAMING_SNAKE_CASE_ : str = self._generate_dummy_images(__lowercase,__lowercase,__lowercase,__lowercase )
SCREAMING_SNAKE_CASE_ : Tuple = dict(preprocessor(images=__lowercase,return_tensors=__lowercase ) )
SCREAMING_SNAKE_CASE_ : Tuple = inputs.pop("pixel_values" )
return inputs
else:
raise ValueError(
"Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor." )
| 18
|
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
__a = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__lowercase , """width_multiplier""" ) )
class SCREAMING_SNAKE_CASE :
def __init__( self : Dict , __lowercase : Union[str, Any] , __lowercase : Dict=13 , __lowercase : int=64 , __lowercase : Tuple=2 , __lowercase : Tuple=3 , __lowercase : Tuple="swish" , __lowercase : List[Any]=3 , __lowercase : List[str]=32 , __lowercase : int=0.1 , __lowercase : Union[str, Any]=0.02 , __lowercase : Optional[int]=True , __lowercase : Dict=True , __lowercase : Tuple=10 , __lowercase : str=None , __lowercase : Optional[Any]=0.25 , __lowercase : str=0.0 , __lowercase : Optional[Any]=0.0 , ):
'''simple docstring'''
__a = parent
__a = batch_size
__a = image_size
__a = patch_size
__a = num_channels
__a = make_divisible(512 * width_multiplier , divisor=8 )
__a = hidden_act
__a = conv_kernel_size
__a = output_stride
__a = classifier_dropout_prob
__a = use_labels
__a = is_training
__a = num_labels
__a = initializer_range
__a = scope
__a = width_multiplier
__a = ffn_dropout
__a = attn_dropout
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
__a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a = None
__a = None
if self.use_labels:
__a = ids_tensor([self.batch_size] , self.num_labels )
__a = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__a = self.get_config()
return config, pixel_values, labels, pixel_labels
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
return MobileViTVaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , )
def UpperCamelCase_ ( self : Tuple , __lowercase : Optional[Any] , __lowercase : int , __lowercase : Optional[Any] , __lowercase : Tuple ):
'''simple docstring'''
__a = MobileViTVaModel(config=__lowercase )
model.to(__lowercase )
model.eval()
__a = model(__lowercase )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def UpperCamelCase_ ( self : Union[str, Any] , __lowercase : List[Any] , __lowercase : str , __lowercase : Optional[int] , __lowercase : Union[str, Any] ):
'''simple docstring'''
__a = self.num_labels
__a = MobileViTVaForImageClassification(__lowercase )
model.to(__lowercase )
model.eval()
__a = model(__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self : int , __lowercase : str , __lowercase : Any , __lowercase : int , __lowercase : List[str] ):
'''simple docstring'''
__a = self.num_labels
__a = MobileViTVaForSemanticSegmentation(__lowercase )
model.to(__lowercase )
model.eval()
__a = model(__lowercase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
__a = model(__lowercase , labels=__lowercase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
__a = self.prepare_config_and_inputs()
__a , __a , __a , __a = config_and_inputs
__a = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
__lowerCamelCase : List[Any] =(
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
__lowerCamelCase : Any =(
{
'feature-extraction': MobileViTVaModel,
'image-classification': MobileViTVaForImageClassification,
'image-segmentation': MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__lowerCamelCase : Dict =False
__lowerCamelCase : Optional[Any] =False
__lowerCamelCase : int =False
__lowerCamelCase : Any =False
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
__a = MobileViTVaModelTester(self )
__a = MobileViTVaConfigTester(self , config_class=__lowercase , has_text_modality=__lowercase )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""MobileViTV2 does not use inputs_embeds""" )
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
pass
@unittest.skip(reason="""MobileViTV2 does not support input and output embeddings""" )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
pass
@unittest.skip(reason="""MobileViTV2 does not output attentions""" )
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason="""Got `CUDA error: misaligned address` for tests after this one being run.""" )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = model_class(__lowercase )
__a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a = [*signature.parameters.keys()]
__a = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __lowercase )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowercase )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
def check_hidden_states_output(__lowercase : List[str] , __lowercase : Optional[int] , __lowercase : List[str] ):
__a = model_class(__lowercase )
model.to(__lowercase )
model.eval()
with torch.no_grad():
__a = model(**self._prepare_for_class(__lowercase , __lowercase ) )
__a = outputs.hidden_states
__a = 5
self.assertEqual(len(__lowercase ) , __lowercase )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
__a = 2
for i in range(len(__lowercase ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = True
check_hidden_states_output(__lowercase , __lowercase , __lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__a = True
check_hidden_states_output(__lowercase , __lowercase , __lowercase )
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowercase )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__lowercase )
@slow
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a = MobileViTVaModel.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
def lowerCAmelCase__ ( ):
"""simple docstring"""
__a = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@cached_property
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
return (
MobileViTImageProcessor.from_pretrained("""apple/mobilevitv2-1.0-imagenet1k-256""" )
if is_vision_available()
else None
)
@slow
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
__a = MobileViTVaForImageClassification.from_pretrained("""apple/mobilevitv2-1.0-imagenet1k-256""" ).to(
__lowercase )
__a = self.default_image_processor
__a = prepare_img()
__a = image_processor(images=__lowercase , return_tensors="""pt""" ).to(__lowercase )
# forward pass
with torch.no_grad():
__a = model(**__lowercase )
# verify the logits
__a = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __lowercase )
__a = torch.tensor([-1.6336E00, -7.3204E-02, -5.1883E-01] ).to(__lowercase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowercase , atol=1E-4 ) )
@slow
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
__a = MobileViTVaForSemanticSegmentation.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
__a = model.to(__lowercase )
__a = MobileViTImageProcessor.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
__a = prepare_img()
__a = image_processor(images=__lowercase , return_tensors="""pt""" ).to(__lowercase )
# forward pass
with torch.no_grad():
__a = model(**__lowercase )
__a = outputs.logits
# verify the logits
__a = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , __lowercase )
__a = torch.tensor(
[
[[7.0863, 7.1525, 6.8201], [6.6931, 6.8770, 6.8933], [6.2978, 7.0366, 6.9636]],
[[-3.7134, -3.6712, -3.6675], [-3.5825, -3.3549, -3.4777], [-3.3435, -3.3979, -3.2857]],
[[-2.9329, -2.8003, -2.7369], [-3.0564, -2.4780, -2.0207], [-2.6889, -1.9298, -1.7640]],
] , device=__lowercase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __lowercase , atol=1E-4 ) )
@slow
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
__a = MobileViTVaForSemanticSegmentation.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
__a = model.to(__lowercase )
__a = MobileViTImageProcessor.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
__a = prepare_img()
__a = image_processor(images=__lowercase , return_tensors="""pt""" ).to(__lowercase )
# forward pass
with torch.no_grad():
__a = model(**__lowercase )
__a = outputs.logits.detach().cpu()
__a = image_processor.post_process_semantic_segmentation(outputs=__lowercase , target_sizes=[(50, 60)] )
__a = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , __lowercase )
__a = image_processor.post_process_semantic_segmentation(outputs=__lowercase )
__a = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , __lowercase )
| 302
| 0
|
"""simple docstring"""
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def _A ( lowercase ):
"""simple docstring"""
a =[
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''encoder.embed_positions._float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _A ( lowercase ):
"""simple docstring"""
a , a =emb.weight.shape
a =nn.Linear(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , bias=_SCREAMING_SNAKE_CASE )
a =emb.weight.data
return lin_layer
def _A ( lowercase , lowercase=None ):
"""simple docstring"""
a ={}
for old_key in state_dict.keys():
a =old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
a =key.replace('''moe_layer.experts.0''' , f'''ffn.experts.expert_{expert_idx}''' )
else:
a =key.replace('''moe_layer.experts.''' , '''ffn.experts.expert_''' )
if "gate" in key:
a =key.replace('''.moe_layer.gate.wg''' , '''.ffn.router.classifier''' )
if "fc2" and "experts" not in key:
a =key.replace('''.fc2.''' , '''.ffn.fc2.''' )
if "fc1" and "experts" not in key:
a =key.replace('''.fc1.''' , '''.ffn.fc1.''' )
if ".encoder_attn." in key:
a =key.replace('''.encoder_attn.''' , '''.cross_attention.''' )
if "encoder_attn_layer_norm" in key:
a =key.replace('''encoder_attn_layer_norm''' , '''cross_attention_layer_norm''' )
if "final_layer_norm" in key:
a =key.replace('''final_layer_norm''' , '''ff_layer_norm''' )
a =state_dict[old_key]
return new_dict
def _A ( lowercase , lowercase , lowercase , lowercase , lowercase = WEIGHTS_NAME ):
"""simple docstring"""
a =[]
a =0
os.makedirs(_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE )
for expert in range(_SCREAMING_SNAKE_CASE ):
a =switch_checkpoint_path + f'''-rank-{expert}.pt'''
if os.path.isfile(_SCREAMING_SNAKE_CASE ):
a =torch.load(_SCREAMING_SNAKE_CASE )['''model''']
remove_ignore_keys_(_SCREAMING_SNAKE_CASE )
a =rename_fairseq_keys(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
a =os.path.join(
_SCREAMING_SNAKE_CASE , weights_name.replace('''.bin''' , f'''-{len(_SCREAMING_SNAKE_CASE )+1:05d}-of-???.bin''' ) )
torch.save(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
sharded_state_dicts.append(expert_state.keys() )
total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size(
expert_state[list(_SCREAMING_SNAKE_CASE )[0]].dtype )
# Add the last block
a =os.path.join(_SCREAMING_SNAKE_CASE , weights_name.replace('''.bin''' , f'''-{len(_SCREAMING_SNAKE_CASE )+1:05d}-of-???.bin''' ) )
a =torch.load(switch_checkpoint_path + '''-shared.pt''' )['''model''']
remove_ignore_keys_(_SCREAMING_SNAKE_CASE )
a =rename_fairseq_keys(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
a =shared_weights['''decoder.embed_tokens.weight''']
sharded_state_dicts.append(shared_weights.keys() )
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(_SCREAMING_SNAKE_CASE ) == 1:
a =os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
torch.save(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Otherwise, let's build the index
a ={}
for idx, shard in enumerate(_SCREAMING_SNAKE_CASE ):
a =weights_name.replace('''.bin''' , f'''-{idx+1:05d}-of-{len(_SCREAMING_SNAKE_CASE ):05d}.bin''' )
a =os.path.join(_SCREAMING_SNAKE_CASE , weights_name.replace('''.bin''' , f'''-{idx+1:05d}-of-???.bin''' ) )
os.rename(_SCREAMING_SNAKE_CASE , os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
for key in shard:
a =shard_file
# Add the metadata
a ={'''total_size''': total_size}
a ={'''metadata''': metadata, '''weight_map''': weight_map}
with open(os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , '''w''' , encoding='''utf-8''' ) as f:
a =json.dumps(_SCREAMING_SNAKE_CASE , indent=2 , sort_keys=_SCREAMING_SNAKE_CASE ) + '''\n'''
f.write(_SCREAMING_SNAKE_CASE )
return metadata, index
if __name__ == "__main__":
lowerCamelCase_ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--nllb_moe_checkpoint_path""",
default="""/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000""",
type=str,
required=False,
help="""Path to a directory containing a folder per layer. Follows the original Google format.""",
)
parser.add_argument("""--dtype""", default="""float32""", type=str, required=False, help="""dtype of the saved model""")
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b""",
type=str,
required=False,
help="""Path to the output pytorch model.""",
)
lowerCamelCase_ : Optional[int] = parser.parse_args()
lowerCamelCase_ , lowerCamelCase_ : List[Any] = shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
1_2_8,
args.dtype,
)
lowerCamelCase_ : Any = NllbMoeConfig.from_pretrained(
"""facebook/nllb-200-3.3B""", encoder_sparse_step=4, decoder_sparse_step=4, num_experts=1_2_8
)
config.save_pretrained(args.pytorch_dump_folder_path)
lowerCamelCase_ : str = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print("""Done""")
model.save_pretrained(args.pytorch_dump_folder_path)
| 81
|
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import torch
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
@dataclass
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
__lowerCamelCase : Union[List[np.ndarray], torch.FloatTensor]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_text_to_video_synth import TextToVideoSDPipeline
from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401
from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
| 302
| 0
|
"""simple docstring"""
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowerCAmelCase_ = 16
lowerCAmelCase_ = 32
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase = 16 ) -> List[Any]:
lowercase__ : List[Any] = AutoTokenizer.from_pretrained('''bert-base-cased''' )
lowercase__ : List[str] = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(__lowerCamelCase ):
# max_length=None => use the model max length (it's actually the default)
lowercase__ : List[str] = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowercase__ : Optional[int] = datasets.map(
_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowercase__ : Dict = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(__lowerCamelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowercase__ : Tuple = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowercase__ : Optional[Any] = 16
elif accelerator.mixed_precision != "no":
lowercase__ : Any = 8
else:
lowercase__ : Any = None
return tokenizer.pad(
_SCREAMING_SNAKE_CASE , padding='''longest''' , max_length=_SCREAMING_SNAKE_CASE , pad_to_multiple_of=_SCREAMING_SNAKE_CASE , return_tensors='''pt''' , )
# Instantiate dataloaders.
lowercase__ : int = DataLoader(
tokenized_datasets['''train'''] , shuffle=_SCREAMING_SNAKE_CASE , collate_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = DataLoader(
tokenized_datasets['''validation'''] , shuffle=_SCREAMING_SNAKE_CASE , collate_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
lowerCAmelCase_ = mocked_dataloaders # noqa: F811
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> List[str]:
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , _SCREAMING_SNAKE_CASE ) == "1":
lowercase__ : List[str] = 2
# Initialize accelerator
lowercase__ : List[str] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowercase__ : Dict = config['''lr''']
lowercase__ : Union[str, Any] = int(config['''num_epochs'''] )
lowercase__ : Union[str, Any] = int(config['''seed'''] )
lowercase__ : int = int(config['''batch_size'''] )
lowercase__ : List[Any] = evaluate.load('''glue''' , '''mrpc''' )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=_SCREAMING_SNAKE_CASE )
def inner_training_loop(__lowerCamelCase ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(_SCREAMING_SNAKE_CASE )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowercase__ : str = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=_SCREAMING_SNAKE_CASE )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowercase__ : int = model.to(accelerator.device )
# Instantiate optimizer
lowercase__ : Optional[Any] = AdamW(params=model.parameters() , lr=_SCREAMING_SNAKE_CASE )
lowercase__ , lowercase__ : Tuple = get_dataloaders(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Instantiate scheduler
lowercase__ : Dict = get_linear_schedule_with_warmup(
optimizer=_SCREAMING_SNAKE_CASE , num_warmup_steps=1_00 , num_training_steps=(len(_SCREAMING_SNAKE_CASE ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ : Union[str, Any] = accelerator.prepare(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Now we train the model
for epoch in range(_SCREAMING_SNAKE_CASE ):
model.train()
for step, batch in enumerate(_SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
lowercase__ : Tuple = model(**_SCREAMING_SNAKE_CASE )
lowercase__ : Tuple = outputs.loss
accelerator.backward(_SCREAMING_SNAKE_CASE )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(_SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowercase__ : Optional[Any] = model(**_SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = outputs.logits.argmax(dim=-1 )
lowercase__ , lowercase__ : List[Any] = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=_SCREAMING_SNAKE_CASE , references=_SCREAMING_SNAKE_CASE , )
lowercase__ : str = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , _SCREAMING_SNAKE_CASE )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def __UpperCAmelCase ( ) -> Optional[Any]:
lowercase__ : List[Any] = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
lowercase__ : Dict = parser.parse_args()
lowercase__ : int = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 16
|
import string
import numpy
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
return b if a == 0 else greatest_common_divisor(b % a , _SCREAMING_SNAKE_CASE )
class SCREAMING_SNAKE_CASE :
__lowerCamelCase : List[str] =string.ascii_uppercase + string.digits
# This cipher takes alphanumerics into account
# i.e. a total of 36 characters
# take x and return x % len(key_string)
__lowerCamelCase : List[Any] =numpy.vectorize(lambda lowerCamelCase__ : x % 36 )
__lowerCamelCase : Optional[Any] =numpy.vectorize(lowerCamelCase__ )
def __init__( self : Union[str, Any] , __lowercase : numpy.ndarray ):
'''simple docstring'''
__a = self.modulus(__lowercase ) # mod36 calc's on the encrypt key
self.check_determinant() # validate the determinant of the encryption key
__a = encrypt_key.shape[0]
def UpperCamelCase_ ( self : Dict , __lowercase : str ):
'''simple docstring'''
return self.key_string.index(__lowercase )
def UpperCamelCase_ ( self : Dict , __lowercase : int ):
'''simple docstring'''
return self.key_string[round(__lowercase )]
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
__a = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
__a = det % len(self.key_string )
__a = len(self.key_string )
if greatest_common_divisor(__lowercase , len(self.key_string ) ) != 1:
__a = (
F"determinant modular {req_l} of encryption key({det}) "
F"is not co prime w.r.t {req_l}.\nTry another key."
)
raise ValueError(__lowercase )
def UpperCamelCase_ ( self : Dict , __lowercase : str ):
'''simple docstring'''
__a = [char for char in text.upper() if char in self.key_string]
__a = chars[-1]
while len(__lowercase ) % self.break_key != 0:
chars.append(__lowercase )
return "".join(__lowercase )
def UpperCamelCase_ ( self : List[str] , __lowercase : str ):
'''simple docstring'''
__a = self.process_text(text.upper() )
__a = """"""
for i in range(0 , len(__lowercase ) - self.break_key + 1 , self.break_key ):
__a = text[i : i + self.break_key]
__a = [self.replace_letters(__lowercase ) for char in batch]
__a = numpy.array([vec] ).T
__a = self.modulus(self.encrypt_key.dot(__lowercase ) ).T.tolist()[
0
]
__a = """""".join(
self.replace_digits(__lowercase ) for num in batch_encrypted )
encrypted += encrypted_batch
return encrypted
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
__a = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
__a = det % len(self.key_string )
__a = None
for i in range(len(self.key_string ) ):
if (det * i) % len(self.key_string ) == 1:
__a = i
break
__a = (
det_inv
* numpy.linalg.det(self.encrypt_key )
* numpy.linalg.inv(self.encrypt_key )
)
return self.to_int(self.modulus(__lowercase ) )
def UpperCamelCase_ ( self : Any , __lowercase : str ):
'''simple docstring'''
__a = self.make_decrypt_key()
__a = self.process_text(text.upper() )
__a = """"""
for i in range(0 , len(__lowercase ) - self.break_key + 1 , self.break_key ):
__a = text[i : i + self.break_key]
__a = [self.replace_letters(__lowercase ) for char in batch]
__a = numpy.array([vec] ).T
__a = self.modulus(decrypt_key.dot(__lowercase ) ).T.tolist()[0]
__a = """""".join(
self.replace_digits(__lowercase ) for num in batch_decrypted )
decrypted += decrypted_batch
return decrypted
def lowerCAmelCase__ ( ):
"""simple docstring"""
__a = int(input("""Enter the order of the encryption key: """ ) )
__a = []
print("""Enter each row of the encryption key with space separated integers""" )
for _ in range(_SCREAMING_SNAKE_CASE ):
__a = [int(_SCREAMING_SNAKE_CASE ) for x in input().split()]
hill_matrix.append(_SCREAMING_SNAKE_CASE )
__a = HillCipher(numpy.array(_SCREAMING_SNAKE_CASE ) )
print("""Would you like to encrypt or decrypt some text? (1 or 2)""" )
__a = input("""\n1. Encrypt\n2. Decrypt\n""" )
if option == "1":
__a = input("""What text would you like to encrypt?: """ )
print("""Your encrypted text is:""" )
print(hc.encrypt(_SCREAMING_SNAKE_CASE ) )
elif option == "2":
__a = input("""What text would you like to decrypt?: """ )
print("""Your decrypted text is:""" )
print(hc.decrypt(_SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 302
| 0
|
from __future__ import annotations
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import is_tf_available, is_vision_available
from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_tf_bert import TFBertModelTester
from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester
from ..deit.test_modeling_tf_deit import TFDeiTModelTester
from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester
from ..vit.test_modeling_tf_vit import TFViTModelTester
if is_tf_available():
from transformers import (
TFBertModel,
TFCLIPVisionModel,
TFDeiTModel,
TFRobertaModel,
TFVisionTextDualEncoderModel,
TFViTModel,
VisionTextDualEncoderConfig,
)
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor
def lowerCAmelCase_ ( _snake_case : Tuple ) -> Tuple:
'''simple docstring'''
if isinstance(_SCREAMING_SNAKE_CASE , collections.abc.Iterable ):
return x
return (x, x)
@require_tf
class _snake_case :
def SCREAMING_SNAKE_CASE ( self , _a , _a ):
pass
def SCREAMING_SNAKE_CASE ( self ):
pass
def SCREAMING_SNAKE_CASE ( self ):
pass
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a , _a , _a=None , **_a ):
__magic_name__ : Any = VisionTextDualEncoderConfig.from_vision_text_configs(__lowercase , __lowercase )
__magic_name__ : Tuple = TFVisionTextDualEncoderModel(__lowercase )
__magic_name__ : int = model(input_ids=__lowercase , pixel_values=__lowercase , attention_mask=__lowercase )
self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], config.projection_dim) )
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a , _a , _a=None , **_a ):
__magic_name__ , __magic_name__ : Optional[int] = self.get_vision_text_model(__lowercase , __lowercase )
__magic_name__ : List[Any] = TFVisionTextDualEncoderModel(vision_model=__lowercase , text_model=__lowercase )
__magic_name__ : Dict = model(input_ids=__lowercase , pixel_values=__lowercase , attention_mask=__lowercase )
self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], model.config.projection_dim) )
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a , _a , _a=None , **_a ):
__magic_name__ , __magic_name__ : str = self.get_vision_text_model(__lowercase , __lowercase )
__magic_name__ : List[Any] = {"vision_model": vision_model, "text_model": text_model}
__magic_name__ : int = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**__lowercase )
__magic_name__ : List[str] = model(input_ids=__lowercase , pixel_values=__lowercase , attention_mask=__lowercase )
self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], model.config.projection_dim) )
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a , _a , _a=None , **_a ):
__magic_name__ , __magic_name__ : Tuple = self.get_vision_text_model(__lowercase , __lowercase )
__magic_name__ : Any = TFVisionTextDualEncoderModel(vision_model=__lowercase , text_model=__lowercase )
__magic_name__ : Any = model(input_ids=__lowercase , pixel_values=__lowercase , attention_mask=__lowercase )
__magic_name__ : Tuple = output[0].numpy()
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__lowercase )
__magic_name__ : List[Any] = TFVisionTextDualEncoderModel.from_pretrained(__lowercase )
__magic_name__ : Dict = model(input_ids=__lowercase , pixel_values=__lowercase , attention_mask=__lowercase )
__magic_name__ : List[Any] = after_output[0].numpy()
__magic_name__ : List[Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__lowercase , 1e-5 )
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a , _a , _a=None , **_a ):
__magic_name__ , __magic_name__ : Optional[int] = self.get_vision_text_model(__lowercase , __lowercase )
__magic_name__ : int = TFVisionTextDualEncoderModel(vision_model=__lowercase , text_model=__lowercase )
__magic_name__ : int = model(
input_ids=__lowercase , pixel_values=__lowercase , attention_mask=__lowercase , output_attentions=__lowercase )
__magic_name__ : Any = output.vision_model_output.attentions
self.assertEqual(len(__lowercase ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
__magic_name__ : Optional[Any] = to_atuple(vision_model.config.image_size )
__magic_name__ : Any = to_atuple(vision_model.config.patch_size )
__magic_name__ : List[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
__magic_name__ : List[Any] = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
__magic_name__ : str = output.text_model_output.attentions
self.assertEqual(len(__lowercase ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a ):
__magic_name__ : Optional[int] = np.abs((a - b) ).max()
self.assertLessEqual(__lowercase , __lowercase , f'''Difference between torch and flax is {diff} (>= {tol}).''' )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Any = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_model(**__lowercase )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Any = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**__lowercase )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Optional[int] = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**__lowercase )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Optional[int] = self.prepare_config_and_inputs()
self.check_save_load(**__lowercase )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : int = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**__lowercase )
@slow
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ , __magic_name__ : Any = self.get_pretrained_model_and_inputs()
__magic_name__ : int = model_a(**__lowercase )
__magic_name__ : Tuple = outputs[0].numpy()
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(__lowercase )
__magic_name__ : List[Any] = TFVisionTextDualEncoderModel.from_pretrained(__lowercase )
__magic_name__ : Tuple = model_a(**__lowercase )
__magic_name__ : Dict = after_outputs[0].numpy()
__magic_name__ : Optional[int] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__lowercase , 1e-5 )
@require_tf
class _snake_case ( lowerCamelCase__ , unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : str = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"hf-internal-testing/tiny-random-vit" , "hf-internal-testing/tiny-random-bert" )
__magic_name__ : str = 13
__magic_name__ : Tuple = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
__magic_name__ : Dict = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
__magic_name__ : str = random_attention_mask([batch_size, 4] )
__magic_name__ : str = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def SCREAMING_SNAKE_CASE ( self , _a , _a ):
__magic_name__ : str = TFViTModel(__lowercase , name="vision_model" )
__magic_name__ : Tuple = TFBertModel(__lowercase , name="text_model" )
return vision_model, text_model
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Dict = TFViTModelTester(self )
__magic_name__ : Union[str, Any] = TFBertModelTester(self )
__magic_name__ : int = vit_model_tester.prepare_config_and_inputs()
__magic_name__ : int = bert_model_tester.prepare_config_and_inputs()
__magic_name__ , __magic_name__ , __magic_name__ : List[Any] = vision_config_and_inputs
(
(
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) ,
) : Optional[int] = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class _snake_case ( lowerCamelCase__ , unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Union[str, Any] = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"Rocketknight1/tiny-random-deit-tf" , "hf-internal-testing/tiny-random-roberta" )
__magic_name__ : Optional[int] = 13
__magic_name__ : str = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
__magic_name__ : Optional[Any] = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
__magic_name__ : Optional[Any] = random_attention_mask([batch_size, 4] )
__magic_name__ : Optional[Any] = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a , _a , _a=None , **_a ):
__magic_name__ , __magic_name__ : Any = self.get_vision_text_model(__lowercase , __lowercase )
__magic_name__ : Tuple = TFVisionTextDualEncoderModel(vision_model=__lowercase , text_model=__lowercase )
__magic_name__ : Optional[Any] = model(
input_ids=__lowercase , pixel_values=__lowercase , attention_mask=__lowercase , output_attentions=__lowercase )
__magic_name__ : Optional[int] = output.vision_model_output.attentions
self.assertEqual(len(__lowercase ) , vision_config.num_hidden_layers )
# in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
__magic_name__ : str = to_atuple(vision_model.config.image_size )
__magic_name__ : List[str] = to_atuple(vision_model.config.patch_size )
__magic_name__ : List[str] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
__magic_name__ : Tuple = num_patches + 2
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
__magic_name__ : Any = output.text_model_output.attentions
self.assertEqual(len(__lowercase ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def SCREAMING_SNAKE_CASE ( self , _a , _a ):
__magic_name__ : List[Any] = TFDeiTModel(__lowercase , name="vision_model" )
__magic_name__ : Union[str, Any] = TFRobertaModel(__lowercase , name="text_model" )
return vision_model, text_model
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : int = TFDeiTModelTester(self )
__magic_name__ : int = TFRobertaModelTester(self )
__magic_name__ : int = vit_model_tester.prepare_config_and_inputs()
__magic_name__ : Union[str, Any] = bert_model_tester.prepare_config_and_inputs()
__magic_name__ , __magic_name__ , __magic_name__ : List[Any] = vision_config_and_inputs
(
(
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) ,
) : Any = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class _snake_case ( lowerCamelCase__ , unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Any = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"Rocketknight1/tiny-random-clip-tf" , "hf-internal-testing/tiny-random-bert" )
__magic_name__ : Union[str, Any] = 13
__magic_name__ : Optional[Any] = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
__magic_name__ : List[Any] = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
__magic_name__ : Tuple = random_attention_mask([batch_size, 4] )
__magic_name__ : Any = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def SCREAMING_SNAKE_CASE ( self , _a , _a ):
__magic_name__ : Union[str, Any] = TFCLIPVisionModel(__lowercase , name="vision_model" )
__magic_name__ : Tuple = TFBertModel(__lowercase , name="text_model" )
return vision_model, text_model
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Dict = TFCLIPVisionModelTester(self )
__magic_name__ : Tuple = TFBertModelTester(self )
__magic_name__ : Optional[int] = clip_model_tester.prepare_config_and_inputs()
__magic_name__ : Dict = bert_model_tester.prepare_config_and_inputs()
__magic_name__ , __magic_name__ : Dict = vision_config_and_inputs
(
(
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) ,
) : str = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_vision
@require_tf
class _snake_case ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Optional[Any] = TFVisionTextDualEncoderModel.from_pretrained(
"clip-italian/clip-italian" , logit_scale_init_value=1.0 , from_pt=__lowercase )
__magic_name__ : str = VisionTextDualEncoderProcessor.from_pretrained("clip-italian/clip-italian" )
__magic_name__ : str = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
__magic_name__ : Tuple = processor(
text=["una foto di un gatto", "una foto di un cane"] , images=__lowercase , padding=__lowercase , return_tensors="np" )
__magic_name__ : List[str] = model(**__lowercase )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
__magic_name__ : int = np.array([[1.2_28_47_27, 0.3_10_41_22]] )
self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , __lowercase , atol=1e-3 ) )
| 281
|
from typing import List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"""huggingface/autoformer-tourism-monthly""": """https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json""",
}
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
__lowerCamelCase : List[Any] ='autoformer'
__lowerCamelCase : str ={
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
'num_hidden_layers': 'encoder_layers',
}
def __init__( self : List[Any] , __lowercase : Optional[int] = None , __lowercase : Optional[int] = None , __lowercase : str = "student_t" , __lowercase : str = "nll" , __lowercase : int = 1 , __lowercase : List[int] = [1, 2, 3, 4, 5, 6, 7] , __lowercase : bool = True , __lowercase : int = 0 , __lowercase : int = 0 , __lowercase : int = 0 , __lowercase : int = 0 , __lowercase : Optional[List[int]] = None , __lowercase : Optional[List[int]] = None , __lowercase : int = 64 , __lowercase : int = 2 , __lowercase : int = 2 , __lowercase : int = 2 , __lowercase : int = 2 , __lowercase : int = 32 , __lowercase : int = 32 , __lowercase : str = "gelu" , __lowercase : float = 0.1 , __lowercase : float = 0.1 , __lowercase : float = 0.1 , __lowercase : float = 0.1 , __lowercase : float = 0.1 , __lowercase : int = 100 , __lowercase : float = 0.02 , __lowercase : bool = True , __lowercase : List[Any]=True , __lowercase : int = 10 , __lowercase : int = 25 , __lowercase : int = 3 , **__lowercase : Optional[int] , ):
'''simple docstring'''
# time series specific configuration
__a = prediction_length
__a = context_length if context_length is not None else prediction_length
__a = distribution_output
__a = loss
__a = input_size
__a = num_time_features
__a = lags_sequence
__a = scaling
__a = num_dynamic_real_features
__a = num_static_real_features
__a = num_static_categorical_features
if cardinality is not None and num_static_categorical_features > 0:
if len(__lowercase ) != num_static_categorical_features:
raise ValueError(
"""The cardinality should be a list of the same length as `num_static_categorical_features`""" )
__a = cardinality
else:
__a = [0]
if embedding_dimension is not None and num_static_categorical_features > 0:
if len(__lowercase ) != num_static_categorical_features:
raise ValueError(
"""The embedding dimension should be a list of the same length as `num_static_categorical_features`""" )
__a = embedding_dimension
else:
__a = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
__a = num_parallel_samples
# Transformer architecture configuration
__a = input_size * len(self.lags_sequence ) + self._number_of_features
__a = d_model
__a = encoder_attention_heads
__a = decoder_attention_heads
__a = encoder_ffn_dim
__a = decoder_ffn_dim
__a = encoder_layers
__a = decoder_layers
__a = dropout
__a = attention_dropout
__a = activation_dropout
__a = encoder_layerdrop
__a = decoder_layerdrop
__a = activation_function
__a = init_std
__a = use_cache
# Autoformer
__a = label_length
__a = moving_average
__a = autocorrelation_factor
super().__init__(is_encoder_decoder=__lowercase , **__lowercase )
@property
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 302
| 0
|
'''simple docstring'''
import numpy as np
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> List[Any]:
return np.where(vector > 0 , _SCREAMING_SNAKE_CASE , (alpha * (np.exp(_SCREAMING_SNAKE_CASE ) - 1)) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 41
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase__ = {
"""configuration_electra""": ["""ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ElectraConfig""", """ElectraOnnxConfig"""],
"""tokenization_electra""": ["""ElectraTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""ElectraTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ElectraForCausalLM""",
"""ElectraForMaskedLM""",
"""ElectraForMultipleChoice""",
"""ElectraForPreTraining""",
"""ElectraForQuestionAnswering""",
"""ElectraForSequenceClassification""",
"""ElectraForTokenClassification""",
"""ElectraModel""",
"""ElectraPreTrainedModel""",
"""load_tf_weights_in_electra""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFElectraForMaskedLM""",
"""TFElectraForMultipleChoice""",
"""TFElectraForPreTraining""",
"""TFElectraForQuestionAnswering""",
"""TFElectraForSequenceClassification""",
"""TFElectraForTokenClassification""",
"""TFElectraModel""",
"""TFElectraPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""FlaxElectraForCausalLM""",
"""FlaxElectraForMaskedLM""",
"""FlaxElectraForMultipleChoice""",
"""FlaxElectraForPreTraining""",
"""FlaxElectraForQuestionAnswering""",
"""FlaxElectraForSequenceClassification""",
"""FlaxElectraForTokenClassification""",
"""FlaxElectraModel""",
"""FlaxElectraPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 302
| 0
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_albert import AlbertTokenizer
else:
a__: Dict = None
a__: Optional[int] = logging.get_logger(__name__)
a__: List[Any] = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
a__: Tuple = {
'vocab_file': {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/spiece.model',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/spiece.model',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/spiece.model',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/spiece.model',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model',
},
'tokenizer_file': {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json',
},
}
a__: Optional[Any] = {
'albert-base-v1': 512,
'albert-large-v1': 512,
'albert-xlarge-v1': 512,
'albert-xxlarge-v1': 512,
'albert-base-v2': 512,
'albert-large-v2': 512,
'albert-xlarge-v2': 512,
'albert-xxlarge-v2': 512,
}
a__: str = '▁'
class SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ):
__SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE = AlbertTokenizer
def __init__( self,__lowerCamelCase=None,__lowerCamelCase=None,__lowerCamelCase=True,__lowerCamelCase=True,__lowerCamelCase=False,__lowerCamelCase="[CLS]",__lowerCamelCase="[SEP]",__lowerCamelCase="<unk>",__lowerCamelCase="[SEP]",__lowerCamelCase="<pad>",__lowerCamelCase="[CLS]",__lowerCamelCase="[MASK]",**__lowerCamelCase,):
A__ = (
AddedToken(__lowercase,lstrip=__lowercase,rstrip=__lowercase,normalized=__lowercase )
if isinstance(__lowercase,__lowercase )
else mask_token
)
super().__init__(
__lowercase,tokenizer_file=__lowercase,do_lower_case=__lowercase,remove_space=__lowercase,keep_accents=__lowercase,bos_token=__lowercase,eos_token=__lowercase,unk_token=__lowercase,sep_token=__lowercase,pad_token=__lowercase,cls_token=__lowercase,mask_token=__lowercase,**__lowercase,)
A__ = do_lower_case
A__ = remove_space
A__ = keep_accents
A__ = vocab_file
A__ = False if not self.vocab_file else True
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase = None ):
A__ = [self.sep_token_id]
A__ = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase = None ):
A__ = [self.sep_token_id]
A__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(__lowercase ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
A__ = os.path.join(
__lowercase,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowercase ):
copyfile(self.vocab_file,__lowercase )
return (out_vocab_file,)
| 193
|
from __future__ import annotations
lowerCamelCase__ = """#"""
class SCREAMING_SNAKE_CASE :
def __init__( self : Optional[Any] ):
'''simple docstring'''
__a = {}
def UpperCamelCase_ ( self : Optional[Any] , __lowercase : str ):
'''simple docstring'''
__a = self._trie
for char in text:
if char not in trie:
__a = {}
__a = trie[char]
__a = True
def UpperCamelCase_ ( self : Tuple , __lowercase : str ):
'''simple docstring'''
__a = self._trie
for char in prefix:
if char in trie:
__a = trie[char]
else:
return []
return self._elements(__lowercase )
def UpperCamelCase_ ( self : Optional[int] , __lowercase : dict ):
'''simple docstring'''
__a = []
for c, v in d.items():
__a = [""" """] if c == END else [(c + s) for s in self._elements(__lowercase )]
result.extend(__lowercase )
return tuple(__lowercase )
lowerCamelCase__ = Trie()
lowerCamelCase__ = ("""depart""", """detergent""", """daring""", """dog""", """deer""", """deal""")
for word in words:
trie.insert_word(word)
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
__a = trie.find_word(_SCREAMING_SNAKE_CASE )
return tuple(string + word for word in suffixes )
def lowerCAmelCase__ ( ):
"""simple docstring"""
print(autocomplete_using_trie("""de""" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 302
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__: Union[str, Any] = logging.get_logger(__name__)
__magic_name__: Tuple = {
"google/vivit-b-16x2-kinetics400": (
"https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json"
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class snake_case__ ( lowerCamelCase__ ):
lowercase__ : Any = 'vivit'
def __init__( self , lowerCAmelCase__=2_24 , lowerCAmelCase__=32 , lowerCAmelCase__=[2, 16, 16] , lowerCAmelCase__=3 , lowerCAmelCase__=7_68 , lowerCAmelCase__=12 , lowerCAmelCase__=12 , lowerCAmelCase__=30_72 , lowerCAmelCase__="gelu_fast" , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0_2 , lowerCAmelCase__=1e-0_6 , lowerCAmelCase__=True , **lowerCAmelCase__ , ) -> Optional[int]:
__magic_name__ : List[Any] = hidden_size
__magic_name__ : Optional[int] = num_hidden_layers
__magic_name__ : Optional[int] = num_attention_heads
__magic_name__ : List[Any] = intermediate_size
__magic_name__ : Any = hidden_act
__magic_name__ : List[Any] = hidden_dropout_prob
__magic_name__ : Dict = attention_probs_dropout_prob
__magic_name__ : int = initializer_range
__magic_name__ : List[str] = layer_norm_eps
__magic_name__ : str = image_size
__magic_name__ : int = num_frames
__magic_name__ : List[Any] = tubelet_size
__magic_name__ : Any = num_channels
__magic_name__ : Union[str, Any] = qkv_bias
super().__init__(**__lowercase )
| 342
|
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
__lowerCamelCase : torch.FloatTensor
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ , lowerCamelCase__ ):
@register_to_config
def __init__( self : Dict , __lowercase : int = 32 , __lowercase : int = 64 , __lowercase : int = 20 , __lowercase : int = 768 , __lowercase : Any=77 , __lowercase : Optional[int]=4 , __lowercase : float = 0.0 , __lowercase : str = "silu" , __lowercase : Optional[str] = None , __lowercase : Optional[str] = None , __lowercase : Optional[str] = "linear" , __lowercase : Optional[str] = "prd" , __lowercase : Optional[int] = None , __lowercase : Optional[int] = None , __lowercase : Optional[int] = None , ):
'''simple docstring'''
super().__init__()
__a = num_attention_heads
__a = attention_head_dim
__a = num_attention_heads * attention_head_dim
__a = additional_embeddings
__a = time_embed_dim or inner_dim
__a = embedding_proj_dim or embedding_dim
__a = clip_embed_dim or embedding_dim
__a = Timesteps(__lowercase , __lowercase , 0 )
__a = TimestepEmbedding(__lowercase , __lowercase , out_dim=__lowercase , act_fn=__lowercase )
__a = nn.Linear(__lowercase , __lowercase )
if embedding_proj_norm_type is None:
__a = None
elif embedding_proj_norm_type == "layer":
__a = nn.LayerNorm(__lowercase )
else:
raise ValueError(F"unsupported embedding_proj_norm_type: {embedding_proj_norm_type}" )
__a = nn.Linear(__lowercase , __lowercase )
if encoder_hid_proj_type is None:
__a = None
elif encoder_hid_proj_type == "linear":
__a = nn.Linear(__lowercase , __lowercase )
else:
raise ValueError(F"unsupported encoder_hid_proj_type: {encoder_hid_proj_type}" )
__a = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , __lowercase ) )
if added_emb_type == "prd":
__a = nn.Parameter(torch.zeros(1 , 1 , __lowercase ) )
elif added_emb_type is None:
__a = None
else:
raise ValueError(
F"`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `'prd'` or `None`." )
__a = nn.ModuleList(
[
BasicTransformerBlock(
__lowercase , __lowercase , __lowercase , dropout=__lowercase , activation_fn="""gelu""" , attention_bias=__lowercase , )
for d in range(__lowercase )
] )
if norm_in_type == "layer":
__a = nn.LayerNorm(__lowercase )
elif norm_in_type is None:
__a = None
else:
raise ValueError(F"Unsupported norm_in_type: {norm_in_type}." )
__a = nn.LayerNorm(__lowercase )
__a = nn.Linear(__lowercase , __lowercase )
__a = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -10000.0 )
causal_attention_mask.triu_(1 )
__a = causal_attention_mask[None, ...]
self.register_buffer("""causal_attention_mask""" , __lowercase , persistent=__lowercase )
__a = nn.Parameter(torch.zeros(1 , __lowercase ) )
__a = nn.Parameter(torch.zeros(1 , __lowercase ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
__a = {}
def fn_recursive_add_processors(__lowercase : str , __lowercase : torch.nn.Module , __lowercase : Dict[str, AttentionProcessor] ):
if hasattr(__lowercase , """set_processor""" ):
__a = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F"{name}.{sub_name}" , __lowercase , __lowercase )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(__lowercase , __lowercase , __lowercase )
return processors
def UpperCamelCase_ ( self : List[str] , __lowercase : Union[AttentionProcessor, Dict[str, AttentionProcessor]] ):
'''simple docstring'''
__a = len(self.attn_processors.keys() )
if isinstance(__lowercase , __lowercase ) and len(__lowercase ) != count:
raise ValueError(
F"A dict of processors was passed, but the number of processors {len(__lowercase )} does not match the"
F" number of attention layers: {count}. Please make sure to pass {count} processor classes." )
def fn_recursive_attn_processor(__lowercase : str , __lowercase : torch.nn.Module , __lowercase : Dict ):
if hasattr(__lowercase , """set_processor""" ):
if not isinstance(__lowercase , __lowercase ):
module.set_processor(__lowercase )
else:
module.set_processor(processor.pop(F"{name}.processor" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F"{name}.{sub_name}" , __lowercase , __lowercase )
for name, module in self.named_children():
fn_recursive_attn_processor(__lowercase , __lowercase , __lowercase )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
self.set_attn_processor(AttnProcessor() )
def UpperCamelCase_ ( self : Union[str, Any] , __lowercase : Optional[int] , __lowercase : Union[torch.Tensor, float, int] , __lowercase : torch.FloatTensor , __lowercase : Optional[torch.FloatTensor] = None , __lowercase : Optional[torch.BoolTensor] = None , __lowercase : bool = True , ):
'''simple docstring'''
__a = hidden_states.shape[0]
__a = timestep
if not torch.is_tensor(__lowercase ):
__a = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device )
elif torch.is_tensor(__lowercase ) and len(timesteps.shape ) == 0:
__a = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
__a = timesteps * torch.ones(__lowercase , dtype=timesteps.dtype , device=timesteps.device )
__a = self.time_proj(__lowercase )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
__a = timesteps_projected.to(dtype=self.dtype )
__a = self.time_embedding(__lowercase )
if self.embedding_proj_norm is not None:
__a = self.embedding_proj_norm(__lowercase )
__a = self.embedding_proj(__lowercase )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
__a = self.encoder_hidden_states_proj(__lowercase )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError("""`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set""" )
__a = self.proj_in(__lowercase )
__a = self.positional_embedding.to(hidden_states.dtype )
__a = []
__a = 0
if encoder_hidden_states is not None:
additional_embeds.append(__lowercase )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
__a = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
__a = hidden_states[:, None, :]
__a = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
__a = self.prd_embedding.to(hidden_states.dtype ).expand(__lowercase , -1 , -1 )
additional_embeds.append(__lowercase )
__a = torch.cat(
__lowercase , dim=1 , )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
__a = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
__a = F.pad(
__lowercase , (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) , value=0.0 , )
__a = hidden_states + positional_embeddings
if attention_mask is not None:
__a = (1 - attention_mask.to(hidden_states.dtype )) * -10000.0
__a = F.pad(__lowercase , (0, self.additional_embeddings) , value=0.0 )
__a = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
__a = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 )
if self.norm_in is not None:
__a = self.norm_in(__lowercase )
for block in self.transformer_blocks:
__a = block(__lowercase , attention_mask=__lowercase )
__a = self.norm_out(__lowercase )
if self.prd_embedding is not None:
__a = hidden_states[:, -1]
else:
__a = hidden_states[:, additional_embeddings_len:]
__a = self.proj_to_clip_embeddings(__lowercase )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=__lowercase )
def UpperCamelCase_ ( self : Any , __lowercase : Tuple ):
'''simple docstring'''
__a = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 302
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCAmelCase = {
'''configuration_upernet''': ['''UperNetConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
'''UperNetForSemanticSegmentation''',
'''UperNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_upernet import UperNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 37
|
from functools import lru_cache
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
__a = 2
__a = set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(_SCREAMING_SNAKE_CASE )
if n > 1:
factors.add(_SCREAMING_SNAKE_CASE )
return factors
@lru_cache
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
return len(unique_prime_factors(_SCREAMING_SNAKE_CASE ) )
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : list ):
"""simple docstring"""
return len(set(_SCREAMING_SNAKE_CASE ) ) in (0, 1)
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
__a = 2
while True:
# Increment each value of a generated range
__a = [base + i for i in range(_SCREAMING_SNAKE_CASE )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
__a = [upf_len(_SCREAMING_SNAKE_CASE ) for x in group]
checker.append(_SCREAMING_SNAKE_CASE )
# If all numbers in the list are equal, return the group variable.
if equality(_SCREAMING_SNAKE_CASE ):
return group
# Increment our base variable by 1
base += 1
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int = 4 ):
"""simple docstring"""
__a = run(_SCREAMING_SNAKE_CASE )
return results[0] if len(_SCREAMING_SNAKE_CASE ) else None
if __name__ == "__main__":
print(solution())
| 302
| 0
|
import argparse
import os
import torch
from transformers import FlavaImageCodebook, FlavaImageCodebookConfig
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Dict , _lowerCamelCase : Optional[int] , _lowerCamelCase : Optional[Any]) -> Dict:
'''simple docstring'''
__UpperCamelCase : str = s.rsplit(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
return new.join(_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Dict) -> Tuple:
'''simple docstring'''
return sum(param.float().sum() if "encoder.embeddings" not in key else 0 for key, param in state_dict.items())
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any]) -> Tuple:
'''simple docstring'''
__UpperCamelCase : Dict = {}
__UpperCamelCase : Optional[int] = ["group_1", "group_2", "group_3", "group_4"]
for key, value in state_dict.items():
for group_key in group_keys:
if group_key in key:
__UpperCamelCase : Optional[int] = key.replace(F'{group_key}.' , F'{group_key}.group.')
if "res_path" in key:
__UpperCamelCase : List[Any] = key.replace("res_path." , "res_path.path.")
if key.endswith(".w"):
__UpperCamelCase : Optional[Any] = rreplace(_SCREAMING_SNAKE_CASE , ".w" , ".weight" , 1)
if key.endswith(".b"):
__UpperCamelCase : str = rreplace(_SCREAMING_SNAKE_CASE , ".b" , ".bias" , 1)
__UpperCamelCase : List[str] = value.float()
return upgrade
@torch.no_grad()
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[Any] , _lowerCamelCase : str , _lowerCamelCase : Optional[int]=None , _lowerCamelCase : Any=True) -> Optional[Any]:
'''simple docstring'''
from dall_e import Encoder
__UpperCamelCase : Optional[int] = Encoder()
if os.path.exists(_SCREAMING_SNAKE_CASE):
__UpperCamelCase : Optional[int] = torch.load(_SCREAMING_SNAKE_CASE)
else:
__UpperCamelCase : Union[str, Any] = torch.hub.load_state_dict_from_url(_SCREAMING_SNAKE_CASE)
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE):
__UpperCamelCase : Optional[Any] = ckpt.state_dict()
encoder.load_state_dict(_SCREAMING_SNAKE_CASE)
if config_path is not None:
__UpperCamelCase : Any = FlavaImageCodebookConfig.from_pretrained(_SCREAMING_SNAKE_CASE)
else:
__UpperCamelCase : Tuple = FlavaImageCodebookConfig()
__UpperCamelCase : Dict = FlavaImageCodebook(_SCREAMING_SNAKE_CASE).eval()
__UpperCamelCase : List[Any] = encoder.state_dict()
__UpperCamelCase : Union[str, Any] = upgrade_state_dict(_SCREAMING_SNAKE_CASE)
hf_model.load_state_dict(_SCREAMING_SNAKE_CASE)
__UpperCamelCase : Optional[int] = hf_model.state_dict()
__UpperCamelCase : Tuple = count_parameters(_SCREAMING_SNAKE_CASE)
__UpperCamelCase : Optional[int] = count_parameters(_SCREAMING_SNAKE_CASE)
assert torch.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1e-3)
if save_checkpoint:
hf_model.save_pretrained(_SCREAMING_SNAKE_CASE)
else:
return hf_state_dict
if __name__ == "__main__":
lowercase : Any = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to flava checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
lowercase : int = parser.parse_args()
convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 232
|
import argparse
import json
from pathlib import Path
import torch
import torchaudio
from datasets import load_dataset
from huggingface_hub import hf_hub_download
from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase__ = logging.get_logger(__name__)
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
__a = ASTConfig()
if "10-10" in model_name:
pass
elif "speech-commands" in model_name:
__a = 128
elif "12-12" in model_name:
__a = 12
__a = 12
elif "14-14" in model_name:
__a = 14
__a = 14
elif "16-16" in model_name:
__a = 16
__a = 16
else:
raise ValueError("""Model not supported""" )
__a = """huggingface/label-files"""
if "speech-commands" in model_name:
__a = 35
__a = """speech-commands-v2-id2label.json"""
else:
__a = 527
__a = """audioset-id2label.json"""
__a = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type="""dataset""" ) , """r""" ) )
__a = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
__a = idalabel
__a = {v: k for k, v in idalabel.items()}
return config
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
if "module.v" in name:
__a = name.replace("""module.v""" , """audio_spectrogram_transformer""" )
if "cls_token" in name:
__a = name.replace("""cls_token""" , """embeddings.cls_token""" )
if "dist_token" in name:
__a = name.replace("""dist_token""" , """embeddings.distillation_token""" )
if "pos_embed" in name:
__a = name.replace("""pos_embed""" , """embeddings.position_embeddings""" )
if "patch_embed.proj" in name:
__a = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
# transformer blocks
if "blocks" in name:
__a = name.replace("""blocks""" , """encoder.layer""" )
if "attn.proj" in name:
__a = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
__a = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
__a = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
__a = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
__a = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
__a = name.replace("""mlp.fc2""" , """output.dense""" )
# final layernorm
if "audio_spectrogram_transformer.norm" in name:
__a = name.replace("""audio_spectrogram_transformer.norm""" , """audio_spectrogram_transformer.layernorm""" )
# classifier head
if "module.mlp_head.0" in name:
__a = name.replace("""module.mlp_head.0""" , """classifier.layernorm""" )
if "module.mlp_head.1" in name:
__a = name.replace("""module.mlp_head.1""" , """classifier.dense""" )
return name
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Union[str, Any] ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
__a = orig_state_dict.pop(_SCREAMING_SNAKE_CASE )
if "qkv" in key:
__a = key.split(""".""" )
__a = int(key_split[3] )
__a = config.hidden_size
if "weight" in key:
__a = val[:dim, :]
__a = val[dim : dim * 2, :]
__a = val[-dim:, :]
else:
__a = val[:dim]
__a = val[dim : dim * 2]
__a = val[-dim:]
else:
__a = val
return orig_state_dict
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[str] ):
"""simple docstring"""
__a = [
"""module.v.head.weight""",
"""module.v.head.bias""",
"""module.v.head_dist.weight""",
"""module.v.head_dist.bias""",
]
for k in ignore_keys:
state_dict.pop(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@torch.no_grad()
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : List[str]=False ):
"""simple docstring"""
__a = get_audio_spectrogram_transformer_config(_SCREAMING_SNAKE_CASE )
__a = {
"""ast-finetuned-audioset-10-10-0.4593""": (
"""https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1"""
),
"""ast-finetuned-audioset-10-10-0.450""": (
"""https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1"""
),
"""ast-finetuned-audioset-10-10-0.448""": (
"""https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1"""
),
"""ast-finetuned-audioset-10-10-0.448-v2""": (
"""https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1"""
),
"""ast-finetuned-audioset-12-12-0.447""": (
"""https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1"""
),
"""ast-finetuned-audioset-14-14-0.443""": (
"""https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1"""
),
"""ast-finetuned-audioset-16-16-0.442""": (
"""https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1"""
),
"""ast-finetuned-speech-commands-v2""": (
"""https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1"""
),
}
# load original state_dict
__a = model_name_to_url[model_name]
__a = torch.hub.load_state_dict_from_url(_SCREAMING_SNAKE_CASE , map_location="""cpu""" )
# remove some keys
remove_keys(_SCREAMING_SNAKE_CASE )
# rename some keys
__a = convert_state_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# load 🤗 model
__a = ASTForAudioClassification(_SCREAMING_SNAKE_CASE )
model.eval()
model.load_state_dict(_SCREAMING_SNAKE_CASE )
# verify outputs on dummy input
# source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62
__a = -4.267_7393 if """speech-commands""" not in model_name else -6.84_5978
__a = 4.568_9974 if """speech-commands""" not in model_name else 5.565_4526
__a = 1024 if """speech-commands""" not in model_name else 128
__a = ASTFeatureExtractor(mean=_SCREAMING_SNAKE_CASE , std=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE )
if "speech-commands" in model_name:
__a = load_dataset("""speech_commands""" , """v0.02""" , split="""validation""" )
__a = dataset[0]["""audio"""]["""array"""]
else:
__a = hf_hub_download(
repo_id="""nielsr/audio-spectogram-transformer-checkpoint""" , filename="""sample_audio.flac""" , repo_type="""dataset""" , )
__a , __a = torchaudio.load(_SCREAMING_SNAKE_CASE )
__a = waveform.squeeze().numpy()
__a = feature_extractor(_SCREAMING_SNAKE_CASE , sampling_rate=1_6000 , return_tensors="""pt""" )
# forward pass
__a = model(**_SCREAMING_SNAKE_CASE )
__a = outputs.logits
if model_name == "ast-finetuned-audioset-10-10-0.4593":
__a = torch.tensor([-0.8760, -7.0042, -8.6602] )
elif model_name == "ast-finetuned-audioset-10-10-0.450":
__a = torch.tensor([-1.1986, -7.0903, -8.2718] )
elif model_name == "ast-finetuned-audioset-10-10-0.448":
__a = torch.tensor([-2.6128, -8.0080, -9.4344] )
elif model_name == "ast-finetuned-audioset-10-10-0.448-v2":
__a = torch.tensor([-1.5080, -7.4534, -8.8917] )
elif model_name == "ast-finetuned-audioset-12-12-0.447":
__a = torch.tensor([-0.5050, -6.5833, -8.0843] )
elif model_name == "ast-finetuned-audioset-14-14-0.443":
__a = torch.tensor([-0.3826, -7.0336, -8.2413] )
elif model_name == "ast-finetuned-audioset-16-16-0.442":
__a = torch.tensor([-1.2113, -6.9101, -8.3470] )
elif model_name == "ast-finetuned-speech-commands-v2":
__a = torch.tensor([6.1589, -8.0566, -8.7984] )
else:
raise ValueError("""Unknown model name""" )
if not torch.allclose(logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 ):
raise ValueError("""Logits don't match""" )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
print(f"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
print(f"Saving feature extractor to {pytorch_dump_folder_path}" )
feature_extractor.save_pretrained(_SCREAMING_SNAKE_CASE )
if push_to_hub:
print("""Pushing model and feature extractor to the hub...""" )
model.push_to_hub(f"MIT/{model_name}" )
feature_extractor.push_to_hub(f"MIT/{model_name}" )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""ast-finetuned-audioset-10-10-0.4593""",
type=str,
help="""Name of the Audio Spectrogram Transformer model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
lowerCamelCase__ = parser.parse_args()
convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 302
| 0
|
import os
import shutil
import sys
import tempfile
import unittest
from pathlib import Path
import pytest
import transformers
from transformers import (
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoTokenizer,
BertConfig,
BertTokenizer,
BertTokenizerFast,
CTRLTokenizer,
GPTaTokenizer,
GPTaTokenizerFast,
PreTrainedTokenizerFast,
RobertaTokenizer,
RobertaTokenizerFast,
is_tokenizers_available,
)
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.auto.tokenization_auto import (
TOKENIZER_MAPPING,
get_tokenizer_config,
tokenizer_class_from_name,
)
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import (
DUMMY_DIFF_TOKENIZER_IDENTIFIER,
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tokenizers,
slow,
)
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class lowerCAmelCase__ ( unittest.TestCase):
'''simple docstring'''
def _lowerCamelCase ( self) -> Union[str, Any]:
_A : Dict = 0
@slow
def _lowerCamelCase ( self) -> Dict:
for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x):
_A : Tuple = AutoTokenizer.from_pretrained(__lowercase)
self.assertIsNotNone(__lowercase)
self.assertIsInstance(__lowercase , (BertTokenizer, BertTokenizerFast))
self.assertGreater(len(__lowercase) , 0)
for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys():
_A : str = AutoTokenizer.from_pretrained(__lowercase)
self.assertIsNotNone(__lowercase)
self.assertIsInstance(__lowercase , (GPTaTokenizer, GPTaTokenizerFast))
self.assertGreater(len(__lowercase) , 0)
def _lowerCamelCase ( self) -> Any:
_A : Union[str, Any] = AutoTokenizer.from_pretrained(__lowercase)
self.assertIsInstance(__lowercase , (BertTokenizer, BertTokenizerFast))
self.assertEqual(tokenizer.vocab_size , 1_2)
def _lowerCamelCase ( self) -> int:
_A : str = AutoTokenizer.from_pretrained(__lowercase)
self.assertIsInstance(__lowercase , (RobertaTokenizer, RobertaTokenizerFast))
self.assertEqual(tokenizer.vocab_size , 2_0)
def _lowerCamelCase ( self) -> List[str]:
_A : Optional[int] = AutoConfig.from_pretrained(__lowercase)
self.assertIsInstance(__lowercase , __lowercase)
# Check that tokenizer_type ≠ model_type
_A : Dict = AutoTokenizer.from_pretrained(__lowercase , config=__lowercase)
self.assertIsInstance(__lowercase , (BertTokenizer, BertTokenizerFast))
self.assertEqual(tokenizer.vocab_size , 1_2)
def _lowerCamelCase ( self) -> Optional[Any]:
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("./tests/fixtures/vocab.txt" , os.path.join(__lowercase , "vocab.txt"))
_A : int = AutoTokenizer.from_pretrained(__lowercase , tokenizer_type="bert" , use_fast=__lowercase)
self.assertIsInstance(__lowercase , __lowercase)
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("./tests/fixtures/vocab.json" , os.path.join(__lowercase , "vocab.json"))
shutil.copy("./tests/fixtures/merges.txt" , os.path.join(__lowercase , "merges.txt"))
_A : Dict = AutoTokenizer.from_pretrained(__lowercase , tokenizer_type="gpt2" , use_fast=__lowercase)
self.assertIsInstance(__lowercase , __lowercase)
@require_tokenizers
def _lowerCamelCase ( self) -> Any:
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("./tests/fixtures/vocab.txt" , os.path.join(__lowercase , "vocab.txt"))
_A : Tuple = AutoTokenizer.from_pretrained(__lowercase , tokenizer_type="bert")
self.assertIsInstance(__lowercase , __lowercase)
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("./tests/fixtures/vocab.json" , os.path.join(__lowercase , "vocab.json"))
shutil.copy("./tests/fixtures/merges.txt" , os.path.join(__lowercase , "merges.txt"))
_A : List[str] = AutoTokenizer.from_pretrained(__lowercase , tokenizer_type="gpt2")
self.assertIsInstance(__lowercase , __lowercase)
def _lowerCamelCase ( self) -> Optional[Any]:
with pytest.raises(__lowercase):
AutoTokenizer.from_pretrained("./" , tokenizer_type="xxx")
@require_tokenizers
def _lowerCamelCase ( self) -> List[Any]:
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
_A : int = tokenizer_class.from_pretrained("wietsedv/bert-base-dutch-cased")
self.assertIsInstance(__lowercase , (BertTokenizer, BertTokenizerFast))
if isinstance(__lowercase , __lowercase):
self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , __lowercase)
else:
self.assertEqual(tokenizer.do_lower_case , __lowercase)
self.assertEqual(tokenizer.model_max_length , 5_1_2)
@require_tokenizers
def _lowerCamelCase ( self) -> Union[str, Any]:
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
with self.assertRaisesRegex(
__lowercase , "julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier" , ):
_A : Union[str, Any] = tokenizer_class.from_pretrained("julien-c/herlolip-not-exists")
def _lowerCamelCase ( self) -> Optional[Any]:
_A : List[Any] = TOKENIZER_MAPPING.values()
_A : Optional[int] = []
for slow_tok, fast_tok in tokenizers:
if slow_tok is not None:
tokenizer_names.append(slow_tok.__name__)
if fast_tok is not None:
tokenizer_names.append(fast_tok.__name__)
for tokenizer_name in tokenizer_names:
# must find the right class
tokenizer_class_from_name(__lowercase)
@require_tokenizers
def _lowerCamelCase ( self) -> List[str]:
self.assertIsInstance(AutoTokenizer.from_pretrained("bert-base-cased" , use_fast=__lowercase) , __lowercase)
self.assertIsInstance(AutoTokenizer.from_pretrained("bert-base-cased") , __lowercase)
@require_tokenizers
def _lowerCamelCase ( self) -> List[Any]:
_A : Optional[int] = AutoTokenizer.from_pretrained("distilbert-base-uncased" , do_lower_case=__lowercase)
_A : Any = "Hello, world. How are you?"
_A : Tuple = tokenizer.tokenize(__lowercase)
self.assertEqual("[UNK]" , tokens[0])
_A : Any = AutoTokenizer.from_pretrained("microsoft/mpnet-base" , do_lower_case=__lowercase)
_A : int = tokenizer.tokenize(__lowercase)
self.assertEqual("[UNK]" , tokens[0])
@require_tokenizers
def _lowerCamelCase ( self) -> Optional[int]:
_A : List[str] = AutoTokenizer.from_pretrained("robot-test/dummy-tokenizer-fast-with-model-config")
self.assertEqual(type(__lowercase) , __lowercase)
self.assertEqual(tokenizer.model_max_length , 5_1_2)
self.assertEqual(tokenizer.vocab_size , 3_0_0_0_0)
self.assertEqual(tokenizer.unk_token , "[UNK]")
self.assertEqual(tokenizer.padding_side , "right")
self.assertEqual(tokenizer.truncation_side , "right")
def _lowerCamelCase ( self) -> str:
_A : List[str] = AutoTokenizer.from_pretrained(__lowercase)
self.assertIsInstance(__lowercase , (BertTokenizer, BertTokenizerFast))
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__lowercase)
_A : Optional[int] = AutoTokenizer.from_pretrained(__lowercase)
self.assertIsInstance(__lowercase , tokenizer.__class__)
self.assertEqual(tokenizera.vocab_size , 1_2)
def _lowerCamelCase ( self) -> str:
_A : str = AutoTokenizer.from_pretrained("ctrl")
# There is no fast CTRL so this always gives us a slow tokenizer.
self.assertIsInstance(__lowercase , __lowercase)
def _lowerCamelCase ( self) -> Tuple:
_A : List[Any] = get_tokenizer_config("bert-base-cased")
_A : Optional[int] = config.pop("_commit_hash" , __lowercase)
# If we ever update bert-base-cased tokenizer config, this dict here will need to be updated.
self.assertEqual(__lowercase , {"do_lower_case": False})
# This model does not have a tokenizer_config so we get back an empty dict.
_A : List[str] = get_tokenizer_config(__lowercase)
self.assertDictEqual(__lowercase , {})
# A tokenizer saved with `save_pretrained` always creates a tokenizer config.
_A : List[Any] = AutoTokenizer.from_pretrained(__lowercase)
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__lowercase)
_A : Optional[int] = get_tokenizer_config(__lowercase)
# Check the class of the tokenizer was properly saved (note that it always saves the slow class).
self.assertEqual(config["tokenizer_class"] , "BertTokenizer")
def _lowerCamelCase ( self) -> Dict:
try:
AutoConfig.register("custom" , __lowercase)
AutoTokenizer.register(__lowercase , slow_tokenizer_class=__lowercase)
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__lowercase):
AutoTokenizer.register(__lowercase , slow_tokenizer_class=__lowercase)
_A : List[Any] = CustomTokenizer.from_pretrained(__lowercase)
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__lowercase)
_A : Tuple = AutoTokenizer.from_pretrained(__lowercase)
self.assertIsInstance(__lowercase , __lowercase)
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
@require_tokenizers
def _lowerCamelCase ( self) -> Union[str, Any]:
try:
AutoConfig.register("custom" , __lowercase)
# Can register in two steps
AutoTokenizer.register(__lowercase , slow_tokenizer_class=__lowercase)
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None))
AutoTokenizer.register(__lowercase , fast_tokenizer_class=__lowercase)
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast))
del TOKENIZER_MAPPING._extra_content[CustomConfig]
# Can register in one step
AutoTokenizer.register(
__lowercase , slow_tokenizer_class=__lowercase , fast_tokenizer_class=__lowercase)
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast))
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__lowercase):
AutoTokenizer.register(__lowercase , fast_tokenizer_class=__lowercase)
# We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer
# and that model does not have a tokenizer.json
with tempfile.TemporaryDirectory() as tmp_dir:
_A : int = BertTokenizerFast.from_pretrained(__lowercase)
bert_tokenizer.save_pretrained(__lowercase)
_A : List[Any] = CustomTokenizerFast.from_pretrained(__lowercase)
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__lowercase)
_A : str = AutoTokenizer.from_pretrained(__lowercase)
self.assertIsInstance(__lowercase , __lowercase)
_A : Union[str, Any] = AutoTokenizer.from_pretrained(__lowercase , use_fast=__lowercase)
self.assertIsInstance(__lowercase , __lowercase)
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def _lowerCamelCase ( self) -> Dict:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(__lowercase):
_A : Tuple = AutoTokenizer.from_pretrained("hf-internal-testing/test_dynamic_tokenizer")
# If remote code is disabled, we can't load this config.
with self.assertRaises(__lowercase):
_A : Union[str, Any] = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=__lowercase)
_A : Optional[Any] = AutoTokenizer.from_pretrained("hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=__lowercase)
self.assertTrue(tokenizer.special_attribute_present)
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__lowercase)
_A : int = AutoTokenizer.from_pretrained(__lowercase , trust_remote_code=__lowercase)
self.assertTrue(reloaded_tokenizer.special_attribute_present)
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizerFast")
self.assertEqual(reloaded_tokenizer.__class__.__name__ , "NewTokenizerFast")
# Test we can also load the slow version
_A : Union[str, Any] = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=__lowercase , use_fast=__lowercase)
self.assertTrue(tokenizer.special_attribute_present)
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer")
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__lowercase)
_A : str = AutoTokenizer.from_pretrained(__lowercase , trust_remote_code=__lowercase , use_fast=__lowercase)
self.assertEqual(reloaded_tokenizer.__class__.__name__ , "NewTokenizer")
self.assertTrue(reloaded_tokenizer.special_attribute_present)
else:
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer")
self.assertEqual(reloaded_tokenizer.__class__.__name__ , "NewTokenizer")
@require_tokenizers
def _lowerCamelCase ( self) -> Dict:
class lowerCAmelCase__ ( lowerCamelCase__):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = False
class lowerCAmelCase__ ( lowerCamelCase__):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = NewTokenizer
__SCREAMING_SNAKE_CASE = False
try:
AutoConfig.register("custom" , __lowercase)
AutoTokenizer.register(__lowercase , slow_tokenizer_class=__lowercase)
AutoTokenizer.register(__lowercase , fast_tokenizer_class=__lowercase)
# If remote code is not set, the default is to use local
_A : List[Any] = AutoTokenizer.from_pretrained("hf-internal-testing/test_dynamic_tokenizer")
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizerFast")
self.assertFalse(tokenizer.special_attribute_present)
_A : int = AutoTokenizer.from_pretrained("hf-internal-testing/test_dynamic_tokenizer" , use_fast=__lowercase)
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer")
self.assertFalse(tokenizer.special_attribute_present)
# If remote code is disabled, we load the local one.
_A : Dict = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=__lowercase)
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizerFast")
self.assertFalse(tokenizer.special_attribute_present)
_A : Union[str, Any] = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=__lowercase , use_fast=__lowercase)
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer")
self.assertFalse(tokenizer.special_attribute_present)
# If remote is enabled, we load from the Hub
_A : Tuple = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=__lowercase)
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizerFast")
self.assertTrue(tokenizer.special_attribute_present)
_A : List[Any] = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=__lowercase , use_fast=__lowercase)
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer")
self.assertTrue(tokenizer.special_attribute_present)
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def _lowerCamelCase ( self) -> Union[str, Any]:
_A : Dict = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer_legacy" , trust_remote_code=__lowercase)
self.assertTrue(tokenizer.special_attribute_present)
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizerFast")
# Test we can also load the slow version
_A : Optional[Any] = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer_legacy" , trust_remote_code=__lowercase , use_fast=__lowercase)
self.assertTrue(tokenizer.special_attribute_present)
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer")
else:
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer")
def _lowerCamelCase ( self) -> List[Any]:
with self.assertRaisesRegex(
__lowercase , "bert-base is not a local folder and is not a valid model identifier"):
_A : str = AutoTokenizer.from_pretrained("bert-base")
def _lowerCamelCase ( self) -> Dict:
with self.assertRaisesRegex(
__lowercase , r"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)"):
_A : Dict = AutoTokenizer.from_pretrained(__lowercase , revision="aaaaaa")
def _lowerCamelCase ( self) -> Dict:
_A : Dict = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert")
with RequestCounter() as counter:
_A : int = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert")
self.assertEqual(counter.get_request_count , 0)
self.assertEqual(counter.head_request_count , 1)
self.assertEqual(counter.other_request_count , 0)
| 11
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_albert import AlbertTokenizer
else:
lowerCamelCase__ = None
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
lowerCamelCase__ = {
"""vocab_file""": {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/spiece.model""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/spiece.model""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/spiece.model""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/spiece.model""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model""",
},
"""tokenizer_file""": {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json""",
},
}
lowerCamelCase__ = {
"""albert-base-v1""": 512,
"""albert-large-v1""": 512,
"""albert-xlarge-v1""": 512,
"""albert-xxlarge-v1""": 512,
"""albert-base-v2""": 512,
"""albert-large-v2""": 512,
"""albert-xlarge-v2""": 512,
"""albert-xxlarge-v2""": 512,
}
lowerCamelCase__ = """▁"""
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
__lowerCamelCase : List[Any] =VOCAB_FILES_NAMES
__lowerCamelCase : Union[str, Any] =PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase : List[str] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase : Any =AlbertTokenizer
def __init__( self : Tuple , __lowercase : Union[str, Any]=None , __lowercase : Optional[int]=None , __lowercase : int=True , __lowercase : Dict=True , __lowercase : str=False , __lowercase : str="[CLS]" , __lowercase : List[Any]="[SEP]" , __lowercase : Any="<unk>" , __lowercase : List[Any]="[SEP]" , __lowercase : List[Any]="<pad>" , __lowercase : Optional[Any]="[CLS]" , __lowercase : List[str]="[MASK]" , **__lowercase : str , ):
'''simple docstring'''
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
__a = (
AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase , normalized=__lowercase )
if isinstance(__lowercase , __lowercase )
else mask_token
)
super().__init__(
__lowercase , tokenizer_file=__lowercase , do_lower_case=__lowercase , remove_space=__lowercase , keep_accents=__lowercase , bos_token=__lowercase , eos_token=__lowercase , unk_token=__lowercase , sep_token=__lowercase , pad_token=__lowercase , cls_token=__lowercase , mask_token=__lowercase , **__lowercase , )
__a = do_lower_case
__a = remove_space
__a = keep_accents
__a = vocab_file
__a = False if not self.vocab_file else True
def UpperCamelCase_ ( self : Dict , __lowercase : List[int] , __lowercase : Optional[List[int]] = None ):
'''simple docstring'''
__a = [self.sep_token_id]
__a = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCamelCase_ ( self : str , __lowercase : List[int] , __lowercase : Optional[List[int]] = None ):
'''simple docstring'''
__a = [self.sep_token_id]
__a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase_ ( self : Tuple , __lowercase : str , __lowercase : Optional[str] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(__lowercase ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
__a = os.path.join(
__lowercase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowercase ):
copyfile(self.vocab_file , __lowercase )
return (out_vocab_file,)
| 302
| 0
|
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class _A( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self ):
__A : Dict = 'hf-internal-testing/tiny-random-t5'
__A : List[str] = AutoTokenizer.from_pretrained(__lowercase )
__A : Optional[int] = AutoModelForSeqaSeqLM.from_pretrained(__lowercase )
__A : Optional[Any] = tokenizer('This is me' , return_tensors='pt' )
__A : Tuple = model.to_bettertransformer()
self.assertTrue(any('BetterTransformer' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
__A : str = model.generate(**__lowercase )
__A : Union[str, Any] = model.reverse_bettertransformer()
self.assertFalse(any('BetterTransformer' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__lowercase )
__A : Union[str, Any] = AutoModelForSeqaSeqLM.from_pretrained(__lowercase )
self.assertFalse(
any('BetterTransformer' in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) )
__A : List[Any] = model_reloaded.generate(**__lowercase )
self.assertTrue(torch.allclose(__lowercase , __lowercase ) )
def UpperCAmelCase_ ( self ):
__A : Union[str, Any] = 'hf-internal-testing/tiny-random-t5'
__A : Tuple = AutoModelForSeqaSeqLM.from_pretrained(__lowercase )
__A : int = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(__lowercase ):
model.save_pretrained(__lowercase )
__A : Union[str, Any] = model.reverse_bettertransformer()
model.save_pretrained(__lowercase )
| 280
|
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
__lowerCamelCase : Optional[int] =(IPNDMScheduler,)
__lowerCamelCase : int =(('num_inference_steps', 50),)
def UpperCamelCase_ ( self : str , **__lowercase : Dict ):
'''simple docstring'''
__a = {"""num_train_timesteps""": 1000}
config.update(**__lowercase )
return config
def UpperCamelCase_ ( self : Any , __lowercase : Tuple=0 , **__lowercase : Dict ):
'''simple docstring'''
__a = dict(self.forward_default_kwargs )
__a = kwargs.pop("""num_inference_steps""" , __lowercase )
__a = self.dummy_sample
__a = 0.1 * sample
__a = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
__a = self.get_scheduler_config(**__lowercase )
__a = scheduler_class(**__lowercase )
scheduler.set_timesteps(__lowercase )
# copy over dummy past residuals
__a = dummy_past_residuals[:]
if time_step is None:
__a = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__lowercase )
__a = scheduler_class.from_pretrained(__lowercase )
new_scheduler.set_timesteps(__lowercase )
# copy over dummy past residuals
__a = dummy_past_residuals[:]
__a = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
__a = new_scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
__a = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
__a = new_scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self : str , __lowercase : int=0 , **__lowercase : Dict ):
'''simple docstring'''
__a = dict(self.forward_default_kwargs )
__a = kwargs.pop("""num_inference_steps""" , __lowercase )
__a = self.dummy_sample
__a = 0.1 * sample
__a = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
__a = self.get_scheduler_config()
__a = scheduler_class(**__lowercase )
scheduler.set_timesteps(__lowercase )
# copy over dummy past residuals (must be after setting timesteps)
__a = dummy_past_residuals[:]
if time_step is None:
__a = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__lowercase )
__a = scheduler_class.from_pretrained(__lowercase )
# copy over dummy past residuals
new_scheduler.set_timesteps(__lowercase )
# copy over dummy past residual (must be after setting timesteps)
__a = dummy_past_residuals[:]
__a = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
__a = new_scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
__a = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
__a = new_scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCamelCase_ ( self : List[str] , **__lowercase : Dict ):
'''simple docstring'''
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config(**__lowercase )
__a = scheduler_class(**__lowercase )
__a = 10
__a = self.dummy_model()
__a = self.dummy_sample_deter
scheduler.set_timesteps(__lowercase )
for i, t in enumerate(scheduler.timesteps ):
__a = model(__lowercase , __lowercase )
__a = scheduler.step(__lowercase , __lowercase , __lowercase ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
__a = model(__lowercase , __lowercase )
__a = scheduler.step(__lowercase , __lowercase , __lowercase ).prev_sample
return sample
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
__a = dict(self.forward_default_kwargs )
__a = kwargs.pop("""num_inference_steps""" , __lowercase )
for scheduler_class in self.scheduler_classes:
__a = self.get_scheduler_config()
__a = scheduler_class(**__lowercase )
__a = self.dummy_sample
__a = 0.1 * sample
if num_inference_steps is not None and hasattr(__lowercase , """set_timesteps""" ):
scheduler.set_timesteps(__lowercase )
elif num_inference_steps is not None and not hasattr(__lowercase , """set_timesteps""" ):
__a = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
__a = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
__a = dummy_past_residuals[:]
__a = scheduler.timesteps[5]
__a = scheduler.timesteps[6]
__a = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
__a = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
__a = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
__a = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=__lowercase , time_step=__lowercase )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=__lowercase , time_step=__lowercase )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
__a = self.full_loop()
__a = torch.mean(torch.abs(__lowercase ) )
assert abs(result_mean.item() - 2540529 ) < 10
| 302
| 0
|
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
| 18
|
from __future__ import annotations
lowerCamelCase__ = {
"""A""": ["""B""", """C""", """E"""],
"""B""": ["""A""", """D""", """E"""],
"""C""": ["""A""", """F""", """G"""],
"""D""": ["""B"""],
"""E""": ["""A""", """B""", """D"""],
"""F""": ["""C"""],
"""G""": ["""C"""],
}
class SCREAMING_SNAKE_CASE :
def __init__( self : Tuple , __lowercase : dict[str, list[str]] , __lowercase : str ):
'''simple docstring'''
__a = graph
# mapping node to its parent in resulting breadth first tree
__a = {}
__a = source_vertex
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
__a = {self.source_vertex}
__a = None
__a = [self.source_vertex] # first in first out queue
while queue:
__a = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(__lowercase )
__a = vertex
queue.append(__lowercase )
def UpperCamelCase_ ( self : Optional[int] , __lowercase : str ):
'''simple docstring'''
if target_vertex == self.source_vertex:
return self.source_vertex
__a = self.parent.get(__lowercase )
if target_vertex_parent is None:
__a = (
F"No path from vertex: {self.source_vertex} to vertex: {target_vertex}"
)
raise ValueError(__lowercase )
return self.shortest_path(__lowercase ) + F"->{target_vertex}"
if __name__ == "__main__":
lowerCamelCase__ = Graph(graph, """G""")
g.breath_first_search()
print(g.shortest_path("""D"""))
print(g.shortest_path("""G"""))
print(g.shortest_path("""Foo"""))
| 302
| 0
|
"""simple docstring"""
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class __A ( lowerCamelCase__ ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
a =self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__lowercase , '''width_multiplier''' ) )
class __A :
"""simple docstring"""
def __init__( self , __A , __A=13 , __A=64 , __A=2 , __A=3 , __A="swish" , __A=3 , __A=32 , __A=0.1 , __A=0.02 , __A=True , __A=True , __A=10 , __A=None , __A=0.25 , __A=0.0 , __A=0.0 , ) -> List[Any]:
a =parent
a =batch_size
a =image_size
a =patch_size
a =num_channels
a =make_divisible(512 * width_multiplier , divisor=8 )
a =hidden_act
a =conv_kernel_size
a =output_stride
a =classifier_dropout_prob
a =use_labels
a =is_training
a =num_labels
a =initializer_range
a =scope
a =width_multiplier
a =ffn_dropout
a =attn_dropout
def SCREAMING_SNAKE_CASE ( self ) -> Any:
a =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a =None
a =None
if self.use_labels:
a =ids_tensor([self.batch_size] , self.num_labels )
a =ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
a =self.get_config()
return config, pixel_values, labels, pixel_labels
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
return MobileViTVaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , )
def SCREAMING_SNAKE_CASE ( self , __A , __A , __A , __A ) -> Any:
a =MobileViTVaModel(config=__lowercase )
model.to(__lowercase )
model.eval()
a =model(__lowercase )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def SCREAMING_SNAKE_CASE ( self , __A , __A , __A , __A ) -> Dict:
a =self.num_labels
a =MobileViTVaForImageClassification(__lowercase )
model.to(__lowercase )
model.eval()
a =model(__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self , __A , __A , __A , __A ) -> Tuple:
a =self.num_labels
a =MobileViTVaForSemanticSegmentation(__lowercase )
model.to(__lowercase )
model.eval()
a =model(__lowercase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
a =model(__lowercase , labels=__lowercase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
a =self.prepare_config_and_inputs()
a , a , a , a =config_and_inputs
a ={'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __A ( lowerCamelCase__, lowerCamelCase__, unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase = (
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
__lowerCAmelCase = (
{
'feature-extraction': MobileViTVaModel,
'image-classification': MobileViTVaForImageClassification,
'image-segmentation': MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
a =MobileViTVaModelTester(self )
a =MobileViTVaConfigTester(self , config_class=__lowercase , has_text_modality=__lowercase )
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
self.config_tester.run_common_tests()
@unittest.skip(reason='''MobileViTV2 does not use inputs_embeds''' )
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
pass
@unittest.skip(reason='''MobileViTV2 does not support input and output embeddings''' )
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
pass
@unittest.skip(reason='''MobileViTV2 does not output attentions''' )
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
pass
@require_torch_multi_gpu
@unittest.skip(reason='''Got `CUDA error: misaligned address` for tests after this one being run.''' )
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def SCREAMING_SNAKE_CASE ( self ) -> str:
pass
def SCREAMING_SNAKE_CASE ( self ) -> int:
a , a =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a =model_class(__lowercase )
a =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a =[*signature.parameters.keys()]
a =['''pixel_values''']
self.assertListEqual(arg_names[:1] , __lowercase )
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
a =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowercase )
def SCREAMING_SNAKE_CASE ( self ) -> int:
def check_hidden_states_output(__A , __A , __A ):
a =model_class(__lowercase )
model.to(__lowercase )
model.eval()
with torch.no_grad():
a =model(**self._prepare_for_class(__lowercase , __lowercase ) )
a =outputs.hidden_states
a =5
self.assertEqual(len(__lowercase ) , __lowercase )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
a =2
for i in range(len(__lowercase ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
a , a =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a =True
check_hidden_states_output(__lowercase , __lowercase , __lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
a =True
check_hidden_states_output(__lowercase , __lowercase , __lowercase )
def SCREAMING_SNAKE_CASE ( self ) -> Any:
a =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowercase )
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
a =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__lowercase )
@slow
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a =MobileViTVaModel.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
def _A ( ):
"""simple docstring"""
a =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __A ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
return (
MobileViTImageProcessor.from_pretrained('''apple/mobilevitv2-1.0-imagenet1k-256''' )
if is_vision_available()
else None
)
@slow
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
a =MobileViTVaForImageClassification.from_pretrained('''apple/mobilevitv2-1.0-imagenet1k-256''' ).to(
__lowercase )
a =self.default_image_processor
a =prepare_img()
a =image_processor(images=__lowercase , return_tensors='''pt''' ).to(__lowercase )
# forward pass
with torch.no_grad():
a =model(**__lowercase )
# verify the logits
a =torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __lowercase )
a =torch.tensor([-1.6_3_3_6E0_0, -7.3_2_0_4E-0_2, -5.1_8_8_3E-0_1] ).to(__lowercase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowercase , atol=1E-4 ) )
@slow
def SCREAMING_SNAKE_CASE ( self ) -> int:
a =MobileViTVaForSemanticSegmentation.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
a =model.to(__lowercase )
a =MobileViTImageProcessor.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
a =prepare_img()
a =image_processor(images=__lowercase , return_tensors='''pt''' ).to(__lowercase )
# forward pass
with torch.no_grad():
a =model(**__lowercase )
a =outputs.logits
# verify the logits
a =torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , __lowercase )
a =torch.tensor(
[
[[7.0_863, 7.1_525, 6.8_201], [6.6_931, 6.8_770, 6.8_933], [6.2_978, 7.0_366, 6.9_636]],
[[-3.7_134, -3.6_712, -3.6_675], [-3.5_825, -3.3_549, -3.4_777], [-3.3_435, -3.3_979, -3.2_857]],
[[-2.9_329, -2.8_003, -2.7_369], [-3.0_564, -2.4_780, -2.0_207], [-2.6_889, -1.9_298, -1.7_640]],
] , device=__lowercase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __lowercase , atol=1E-4 ) )
@slow
def SCREAMING_SNAKE_CASE ( self ) -> Any:
a =MobileViTVaForSemanticSegmentation.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
a =model.to(__lowercase )
a =MobileViTImageProcessor.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
a =prepare_img()
a =image_processor(images=__lowercase , return_tensors='''pt''' ).to(__lowercase )
# forward pass
with torch.no_grad():
a =model(**__lowercase )
a =outputs.logits.detach().cpu()
a =image_processor.post_process_semantic_segmentation(outputs=__lowercase , target_sizes=[(50, 60)] )
a =torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , __lowercase )
a =image_processor.post_process_semantic_segmentation(outputs=__lowercase )
a =torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , __lowercase )
| 81
|
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ , unittest.TestCase ):
__lowerCamelCase : Tuple =KandinskyVaaPriorPipeline
__lowerCamelCase : Union[str, Any] =['prompt']
__lowerCamelCase : Any =['prompt', 'negative_prompt']
__lowerCamelCase : List[str] =[
'num_images_per_prompt',
'generator',
'num_inference_steps',
'latents',
'negative_prompt',
'guidance_scale',
'output_type',
'return_dict',
]
__lowerCamelCase : List[Any] =False
@property
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
return 32
@property
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
return 32
@property
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
return self.time_input_dim
@property
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
return 100
@property
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
__a = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
return tokenizer
@property
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
torch.manual_seed(0 )
__a = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(__lowercase )
@property
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
torch.manual_seed(0 )
__a = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 12,
"""embedding_dim""": self.text_embedder_hidden_size,
"""num_layers""": 1,
}
__a = PriorTransformer(**__lowercase )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
__a = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
__a = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=224 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=14 , )
__a = CLIPVisionModelWithProjection(__lowercase )
return model
@property
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
__a = CLIPImageProcessor(
crop_size=224 , do_center_crop=__lowercase , do_normalize=__lowercase , do_resize=__lowercase , image_mean=[0.48145466, 0.4578275, 0.40821073] , image_std=[0.26862954, 0.26130258, 0.27577711] , resample=3 , size=224 , )
return image_processor
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
__a = self.dummy_prior
__a = self.dummy_image_encoder
__a = self.dummy_text_encoder
__a = self.dummy_tokenizer
__a = self.dummy_image_processor
__a = UnCLIPScheduler(
variance_type="""fixed_small_log""" , prediction_type="""sample""" , num_train_timesteps=1000 , clip_sample=__lowercase , clip_sample_range=10.0 , )
__a = {
"""prior""": prior,
"""image_encoder""": image_encoder,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""scheduler""": scheduler,
"""image_processor""": image_processor,
}
return components
def UpperCamelCase_ ( self : Optional[int] , __lowercase : List[str] , __lowercase : Any=0 ):
'''simple docstring'''
if str(__lowercase ).startswith("""mps""" ):
__a = torch.manual_seed(__lowercase )
else:
__a = torch.Generator(device=__lowercase ).manual_seed(__lowercase )
__a = {
"""prompt""": """horse""",
"""generator""": generator,
"""guidance_scale""": 4.0,
"""num_inference_steps""": 2,
"""output_type""": """np""",
}
return inputs
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
__a = """cpu"""
__a = self.get_dummy_components()
__a = self.pipeline_class(**__lowercase )
__a = pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
__a = pipe(**self.get_dummy_inputs(__lowercase ) )
__a = output.image_embeds
__a = pipe(
**self.get_dummy_inputs(__lowercase ) , return_dict=__lowercase , )[0]
__a = image[0, -10:]
__a = image_from_tuple[0, -10:]
assert image.shape == (1, 32)
__a = np.array(
[-0.0532, 1.7120, 0.3656, -1.0852, -0.8946, -1.1756, 0.4348, 0.2482, 0.5146, -0.1156] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
__a = torch_device == """cpu"""
__a = True
__a = False
self._test_inference_batch_single_identical(
test_max_difference=__lowercase , relax_max_difference=__lowercase , test_mean_pixel_difference=__lowercase , )
@skip_mps
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
__a = torch_device == """cpu"""
__a = False
self._test_attention_slicing_forward_pass(
test_max_difference=__lowercase , test_mean_pixel_difference=__lowercase , )
| 302
| 0
|
"""simple docstring"""
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class __A ( lowerCamelCase__ ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = (IPNDMScheduler,)
lowerCAmelCase : int = (('num_inference_steps', 5_0),)
def UpperCAmelCase ( self : str ,**_snake_case : Dict ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : Dict = {'''num_train_timesteps''': 1_000}
config.update(**__lowercase )
return config
def UpperCAmelCase ( self : Any ,_snake_case : Tuple=0 ,**_snake_case : Dict ) -> Tuple:
"""simple docstring"""
lowercase__ : Dict = dict(self.forward_default_kwargs )
lowercase__ : Optional[int] = kwargs.pop('''num_inference_steps''' ,__lowercase )
lowercase__ : Optional[int] = self.dummy_sample
lowercase__ : Any = 0.1 * sample
lowercase__ : Tuple = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
lowercase__ : Optional[Any] = self.get_scheduler_config(**__lowercase )
lowercase__ : Union[str, Any] = scheduler_class(**__lowercase )
scheduler.set_timesteps(__lowercase )
# copy over dummy past residuals
lowercase__ : str = dummy_past_residuals[:]
if time_step is None:
lowercase__ : Optional[Any] = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__lowercase )
lowercase__ : List[Any] = scheduler_class.from_pretrained(__lowercase )
new_scheduler.set_timesteps(__lowercase )
# copy over dummy past residuals
lowercase__ : Tuple = dummy_past_residuals[:]
lowercase__ : Dict = scheduler.step(__lowercase ,__lowercase ,__lowercase ,**__lowercase ).prev_sample
lowercase__ : Any = new_scheduler.step(__lowercase ,__lowercase ,__lowercase ,**__lowercase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
lowercase__ : Optional[int] = scheduler.step(__lowercase ,__lowercase ,__lowercase ,**__lowercase ).prev_sample
lowercase__ : Any = new_scheduler.step(__lowercase ,__lowercase ,__lowercase ,**__lowercase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def UpperCAmelCase ( self : str ) -> Tuple:
"""simple docstring"""
pass
def UpperCAmelCase ( self : str ,_snake_case : int=0 ,**_snake_case : Dict ) -> Dict:
"""simple docstring"""
lowercase__ : str = dict(self.forward_default_kwargs )
lowercase__ : Optional[Any] = kwargs.pop('''num_inference_steps''' ,__lowercase )
lowercase__ : Optional[int] = self.dummy_sample
lowercase__ : List[Any] = 0.1 * sample
lowercase__ : List[Any] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
lowercase__ : str = self.get_scheduler_config()
lowercase__ : List[str] = scheduler_class(**__lowercase )
scheduler.set_timesteps(__lowercase )
# copy over dummy past residuals (must be after setting timesteps)
lowercase__ : Optional[int] = dummy_past_residuals[:]
if time_step is None:
lowercase__ : Union[str, Any] = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__lowercase )
lowercase__ : Dict = scheduler_class.from_pretrained(__lowercase )
# copy over dummy past residuals
new_scheduler.set_timesteps(__lowercase )
# copy over dummy past residual (must be after setting timesteps)
lowercase__ : Optional[Any] = dummy_past_residuals[:]
lowercase__ : Union[str, Any] = scheduler.step(__lowercase ,__lowercase ,__lowercase ,**__lowercase ).prev_sample
lowercase__ : int = new_scheduler.step(__lowercase ,__lowercase ,__lowercase ,**__lowercase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
lowercase__ : int = scheduler.step(__lowercase ,__lowercase ,__lowercase ,**__lowercase ).prev_sample
lowercase__ : str = new_scheduler.step(__lowercase ,__lowercase ,__lowercase ,**__lowercase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def UpperCAmelCase ( self : List[str] ,**_snake_case : Dict ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : List[str] = self.scheduler_classes[0]
lowercase__ : str = self.get_scheduler_config(**__lowercase )
lowercase__ : Optional[Any] = scheduler_class(**__lowercase )
lowercase__ : List[str] = 10
lowercase__ : str = self.dummy_model()
lowercase__ : Union[str, Any] = self.dummy_sample_deter
scheduler.set_timesteps(__lowercase )
for i, t in enumerate(scheduler.timesteps ):
lowercase__ : Union[str, Any] = model(__lowercase ,__lowercase )
lowercase__ : str = scheduler.step(__lowercase ,__lowercase ,__lowercase ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
lowercase__ : Optional[Any] = model(__lowercase ,__lowercase )
lowercase__ : Union[str, Any] = scheduler.step(__lowercase ,__lowercase ,__lowercase ).prev_sample
return sample
def UpperCAmelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : List[Any] = dict(self.forward_default_kwargs )
lowercase__ : Tuple = kwargs.pop('''num_inference_steps''' ,__lowercase )
for scheduler_class in self.scheduler_classes:
lowercase__ : Optional[int] = self.get_scheduler_config()
lowercase__ : int = scheduler_class(**__lowercase )
lowercase__ : str = self.dummy_sample
lowercase__ : int = 0.1 * sample
if num_inference_steps is not None and hasattr(__lowercase ,'''set_timesteps''' ):
scheduler.set_timesteps(__lowercase )
elif num_inference_steps is not None and not hasattr(__lowercase ,'''set_timesteps''' ):
lowercase__ : str = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
lowercase__ : Optional[Any] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
lowercase__ : Any = dummy_past_residuals[:]
lowercase__ : Union[str, Any] = scheduler.timesteps[5]
lowercase__ : Any = scheduler.timesteps[6]
lowercase__ : List[Any] = scheduler.step(__lowercase ,__lowercase ,__lowercase ,**__lowercase ).prev_sample
lowercase__ : Any = scheduler.step(__lowercase ,__lowercase ,__lowercase ,**__lowercase ).prev_sample
self.assertEqual(output_a.shape ,sample.shape )
self.assertEqual(output_a.shape ,output_a.shape )
lowercase__ : List[str] = scheduler.step(__lowercase ,__lowercase ,__lowercase ,**__lowercase ).prev_sample
lowercase__ : Optional[Any] = scheduler.step(__lowercase ,__lowercase ,__lowercase ,**__lowercase ).prev_sample
self.assertEqual(output_a.shape ,sample.shape )
self.assertEqual(output_a.shape ,output_a.shape )
def UpperCAmelCase ( self : List[Any] ) -> int:
"""simple docstring"""
for timesteps in [100, 1_000]:
self.check_over_configs(num_train_timesteps=__lowercase ,time_step=__lowercase )
def UpperCAmelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
for t, num_inference_steps in zip([1, 5, 10] ,[10, 50, 100] ):
self.check_over_forward(num_inference_steps=__lowercase ,time_step=__lowercase )
def UpperCAmelCase ( self : int ) -> Dict:
"""simple docstring"""
lowercase__ : List[Any] = self.full_loop()
lowercase__ : Union[str, Any] = torch.mean(torch.abs(__lowercase ) )
assert abs(result_mean.item() - 2_540_529 ) < 10
| 16
|
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = Dict[str, Any]
lowerCamelCase__ = List[Prediction]
@add_end_docstrings(lowerCamelCase__ )
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
def __init__( self : Tuple , *__lowercase : Tuple , **__lowercase : Optional[int] ):
'''simple docstring'''
super().__init__(*__lowercase , **__lowercase )
if self.framework == "tf":
raise ValueError(F"The {self.__class__} is only available in PyTorch." )
requires_backends(self , """vision""" )
self.check_model_type(
dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) )
def UpperCamelCase_ ( self : Optional[int] , **__lowercase : List[str] ):
'''simple docstring'''
__a = {}
if "threshold" in kwargs:
__a = kwargs["""threshold"""]
return {}, {}, postprocess_kwargs
def __call__( self : List[Any] , *__lowercase : Any , **__lowercase : Tuple ):
'''simple docstring'''
return super().__call__(*__lowercase , **__lowercase )
def UpperCamelCase_ ( self : str , __lowercase : Tuple ):
'''simple docstring'''
__a = load_image(__lowercase )
__a = torch.IntTensor([[image.height, image.width]] )
__a = self.image_processor(images=[image] , return_tensors="""pt""" )
if self.tokenizer is not None:
__a = self.tokenizer(text=inputs["""words"""] , boxes=inputs["""boxes"""] , return_tensors="""pt""" )
__a = target_size
return inputs
def UpperCamelCase_ ( self : Dict , __lowercase : List[str] ):
'''simple docstring'''
__a = model_inputs.pop("""target_size""" )
__a = self.model(**__lowercase )
__a = outputs.__class__({"""target_size""": target_size, **outputs} )
if self.tokenizer is not None:
__a = model_inputs["""bbox"""]
return model_outputs
def UpperCamelCase_ ( self : Optional[int] , __lowercase : List[Any] , __lowercase : List[Any]=0.9 ):
'''simple docstring'''
__a = model_outputs["""target_size"""]
if self.tokenizer is not None:
# This is a LayoutLMForTokenClassification variant.
# The OCR got the boxes and the model classified the words.
__a , __a = target_size[0].tolist()
def unnormalize(__lowercase : Optional[Any] ):
return self._get_bounding_box(
torch.Tensor(
[
(width * bbox[0] / 1000),
(height * bbox[1] / 1000),
(width * bbox[2] / 1000),
(height * bbox[3] / 1000),
] ) )
__a , __a = model_outputs["""logits"""].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 )
__a = [self.model.config.idalabel[prediction] for prediction in classes.tolist()]
__a = [unnormalize(__lowercase ) for bbox in model_outputs["""bbox"""].squeeze(0 )]
__a = ["""score""", """label""", """box"""]
__a = [dict(zip(__lowercase , __lowercase ) ) for vals in zip(scores.tolist() , __lowercase , __lowercase ) if vals[0] > threshold]
else:
# This is a regular ForObjectDetectionModel
__a = self.image_processor.post_process_object_detection(__lowercase , __lowercase , __lowercase )
__a = raw_annotations[0]
__a = raw_annotation["""scores"""]
__a = raw_annotation["""labels"""]
__a = raw_annotation["""boxes"""]
__a = scores.tolist()
__a = [self.model.config.idalabel[label.item()] for label in labels]
__a = [self._get_bounding_box(__lowercase ) for box in boxes]
# {"scores": [...], ...} --> [{"score":x, ...}, ...]
__a = ["""score""", """label""", """box"""]
__a = [
dict(zip(__lowercase , __lowercase ) )
for vals in zip(raw_annotation["""scores"""] , raw_annotation["""labels"""] , raw_annotation["""boxes"""] )
]
return annotation
def UpperCamelCase_ ( self : Optional[int] , __lowercase : "torch.Tensor" ):
'''simple docstring'''
if self.framework != "pt":
raise ValueError("""The ObjectDetectionPipeline is only available in PyTorch.""" )
__a , __a , __a , __a = box.int().tolist()
__a = {
"""xmin""": xmin,
"""ymin""": ymin,
"""xmax""": xmax,
"""ymax""": ymax,
}
return bbox
| 302
| 0
|
import math
import os
import sys
def lowerCAmelCase_ ( _snake_case : str ) -> Dict:
'''simple docstring'''
__magic_name__ : Optional[int] = ""
try:
with open(_SCREAMING_SNAKE_CASE , "rb" ) as binary_file:
__magic_name__ : str = binary_file.read()
for dat in data:
__magic_name__ : Any = F'''{dat:08b}'''
result += curr_byte
return result
except OSError:
print("File not accessible" )
sys.exit()
def lowerCAmelCase_ ( _snake_case : dict[str, str] , _snake_case : str , _snake_case : int , _snake_case : str ) -> Optional[int]:
'''simple docstring'''
lexicon.pop(_SCREAMING_SNAKE_CASE )
__magic_name__ : List[Any] = last_match_id
if math.loga(_SCREAMING_SNAKE_CASE ).is_integer():
for curr_key in lexicon:
__magic_name__ : List[str] = "0" + lexicon[curr_key]
__magic_name__ : str = bin(_SCREAMING_SNAKE_CASE )[2:]
def lowerCAmelCase_ ( _snake_case : str ) -> Dict:
'''simple docstring'''
__magic_name__ : List[str] = {"0": "0", "1": "1"}
__magic_name__ , __magic_name__ : Optional[int] = "", ""
__magic_name__ : str = len(_SCREAMING_SNAKE_CASE )
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
__magic_name__ : Any = lexicon[curr_string]
result += last_match_id
add_key_to_lexicon(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
index += 1
__magic_name__ : Optional[int] = ""
while curr_string != "" and curr_string not in lexicon:
curr_string += "0"
if curr_string != "":
__magic_name__ : Any = lexicon[curr_string]
result += last_match_id
return result
def lowerCAmelCase_ ( _snake_case : str , _snake_case : str ) -> Dict:
'''simple docstring'''
__magic_name__ : Optional[Any] = os.path.getsize(_SCREAMING_SNAKE_CASE )
__magic_name__ : Optional[int] = bin(_SCREAMING_SNAKE_CASE )[2:]
__magic_name__ : Tuple = len(_SCREAMING_SNAKE_CASE )
return "0" * (length_length - 1) + file_length_binary + compressed
def lowerCAmelCase_ ( _snake_case : str , _snake_case : str ) -> Dict:
'''simple docstring'''
__magic_name__ : Tuple = 8
try:
with open(_SCREAMING_SNAKE_CASE , "wb" ) as opened_file:
__magic_name__ : int = [
to_write[i : i + byte_length]
for i in range(0 , len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append("10000000" )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array:
opened_file.write(int(_SCREAMING_SNAKE_CASE , 2 ).to_bytes(1 , byteorder="big" ) )
except OSError:
print("File not accessible" )
sys.exit()
def lowerCAmelCase_ ( _snake_case : str , _snake_case : str ) -> Tuple:
'''simple docstring'''
__magic_name__ : Dict = read_file_binary(_SCREAMING_SNAKE_CASE )
__magic_name__ : Dict = compress_data(_SCREAMING_SNAKE_CASE )
__magic_name__ : Optional[int] = add_file_length(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
write_file_binary(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 281
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCamelCase__ = {
"""configuration_efficientnet""": [
"""EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""EfficientNetConfig""",
"""EfficientNetOnnxConfig""",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""EfficientNetImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""EfficientNetForImageClassification""",
"""EfficientNetModel""",
"""EfficientNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_efficientnet import (
EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
EfficientNetConfig,
EfficientNetOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientnet import EfficientNetImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientnet import (
EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientNetForImageClassification,
EfficientNetModel,
EfficientNetPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 302
| 0
|
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class _lowercase ( unittest.TestCase ):
@property
def lowerCamelCase_ ( self: Union[str, Any] ):
torch.manual_seed(0 )
lowerCamelCase__ : int = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
return model
def lowerCamelCase_ ( self: int ):
lowerCamelCase__ : Optional[int] = self.dummy_uncond_unet
lowerCamelCase__ : Tuple = ScoreSdeVeScheduler()
lowerCamelCase__ : Union[str, Any] = ScoreSdeVePipeline(unet=__lowercase , scheduler=__lowercase )
sde_ve.to(__lowercase )
sde_ve.set_progress_bar_config(disable=__lowercase )
lowerCamelCase__ : str = torch.manual_seed(0 )
lowerCamelCase__ : int = sde_ve(num_inference_steps=2 , output_type="""numpy""" , generator=__lowercase ).images
lowerCamelCase__ : Dict = torch.manual_seed(0 )
lowerCamelCase__ : Optional[Any] = sde_ve(num_inference_steps=2 , output_type="""numpy""" , generator=__lowercase , return_dict=__lowercase )[
0
]
lowerCamelCase__ : List[str] = image[0, -3:, -3:, -1]
lowerCamelCase__ : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCamelCase__ : Dict = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class _lowercase ( unittest.TestCase ):
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : Union[str, Any] = """google/ncsnpp-church-256"""
lowerCamelCase__ : Any = UNetaDModel.from_pretrained(__lowercase )
lowerCamelCase__ : Optional[int] = ScoreSdeVeScheduler.from_pretrained(__lowercase )
lowerCamelCase__ : List[Any] = ScoreSdeVePipeline(unet=__lowercase , scheduler=__lowercase )
sde_ve.to(__lowercase )
sde_ve.set_progress_bar_config(disable=__lowercase )
lowerCamelCase__ : Any = torch.manual_seed(0 )
lowerCamelCase__ : int = sde_ve(num_inference_steps=10 , output_type="""numpy""" , generator=__lowercase ).images
lowerCamelCase__ : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
lowerCamelCase__ : Tuple = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 41
|
import random
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
__a , __a , __a = [], [], []
for element in data:
if element < pivot:
less.append(_SCREAMING_SNAKE_CASE )
elif element > pivot:
greater.append(_SCREAMING_SNAKE_CASE )
else:
equal.append(_SCREAMING_SNAKE_CASE )
return less, equal, greater
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
if index >= len(_SCREAMING_SNAKE_CASE ) or index < 0:
return None
__a = items[random.randint(0 , len(_SCREAMING_SNAKE_CASE ) - 1 )]
__a = 0
__a , __a , __a = _partition(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__a = len(_SCREAMING_SNAKE_CASE )
__a = len(_SCREAMING_SNAKE_CASE )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# must be in larger
else:
return quick_select(_SCREAMING_SNAKE_CASE , index - (m + count) )
| 302
| 0
|
from math import ceil, sqrt
def UpperCamelCase__( UpperCamelCase__ : int = 1_00_00_00 )->Union[str, Any]:
A__ = 0
for outer_width in range(3 , (limit // 4) + 2 ):
if outer_width**2 > limit:
A__ = max(ceil(sqrt(outer_width**2 - limit ) ) , 1 )
else:
A__ = 1
if (outer_width - hole_width_lower_bound) % 2:
hole_width_lower_bound += 1
answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1
return answer
if __name__ == "__main__":
print(F"{solution() = }")
| 193
|
from collections import UserDict
from typing import Union
import numpy as np
import requests
from ..utils import (
add_end_docstrings,
logging,
)
from .audio_classification import ffmpeg_read
from .base import PIPELINE_INIT_ARGS, Pipeline
lowerCamelCase__ = logging.get_logger(__name__)
@add_end_docstrings(lowerCamelCase__ )
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
def __init__( self : Optional[int] , **__lowercase : Dict ):
'''simple docstring'''
super().__init__(**__lowercase )
if self.framework != "pt":
raise ValueError(F"The {self.__class__} is only available in PyTorch." )
# No specific FOR_XXX available yet
def __call__( self : str , __lowercase : Union[np.ndarray, bytes, str] , **__lowercase : int ):
'''simple docstring'''
return super().__call__(__lowercase , **__lowercase )
def UpperCamelCase_ ( self : List[Any] , **__lowercase : Union[str, Any] ):
'''simple docstring'''
__a = {}
if "candidate_labels" in kwargs:
__a = kwargs["""candidate_labels"""]
if "hypothesis_template" in kwargs:
__a = kwargs["""hypothesis_template"""]
return preprocess_params, {}, {}
def UpperCamelCase_ ( self : int , __lowercase : Dict , __lowercase : Dict=None , __lowercase : str="This is a sound of {}." ):
'''simple docstring'''
if isinstance(__lowercase , __lowercase ):
if audio.startswith("""http://""" ) or audio.startswith("""https://""" ):
# We need to actually check for a real protocol, otherwise it's impossible to use a local file
# like http_huggingface_co.png
__a = requests.get(__lowercase ).content
else:
with open(__lowercase , """rb""" ) as f:
__a = f.read()
if isinstance(__lowercase , __lowercase ):
__a = ffmpeg_read(__lowercase , self.feature_extractor.sampling_rate )
if not isinstance(__lowercase , np.ndarray ):
raise ValueError("""We expect a numpy ndarray as input""" )
if len(audio.shape ) != 1:
raise ValueError("""We expect a single channel audio input for ZeroShotAudioClassificationPipeline""" )
__a = self.feature_extractor(
[audio] , sampling_rate=self.feature_extractor.sampling_rate , return_tensors="""pt""" )
__a = candidate_labels
__a = [hypothesis_template.format(__lowercase ) for x in candidate_labels]
__a = self.tokenizer(__lowercase , return_tensors=self.framework , padding=__lowercase )
__a = [text_inputs]
return inputs
def UpperCamelCase_ ( self : Any , __lowercase : Any ):
'''simple docstring'''
__a = model_inputs.pop("""candidate_labels""" )
__a = model_inputs.pop("""text_inputs""" )
if isinstance(text_inputs[0] , __lowercase ):
__a = text_inputs[0]
else:
# Batching case.
__a = text_inputs[0][0]
__a = self.model(**__lowercase , **__lowercase )
__a = {
"""candidate_labels""": candidate_labels,
"""logits""": outputs.logits_per_audio,
}
return model_outputs
def UpperCamelCase_ ( self : Optional[Any] , __lowercase : Dict ):
'''simple docstring'''
__a = model_outputs.pop("""candidate_labels""" )
__a = model_outputs["""logits"""][0]
if self.framework == "pt":
__a = logits.softmax(dim=0 )
__a = probs.tolist()
else:
raise ValueError("""`tf` framework not supported.""" )
__a = [
{"""score""": score, """label""": candidate_label}
for score, candidate_label in sorted(zip(__lowercase , __lowercase ) , key=lambda __lowercase : -x[0] )
]
return result
| 302
| 0
|
import gc
import unittest
import numpy as np
import torch
import torch.nn.functional as F
from transformers import (
ClapTextConfig,
ClapTextModelWithProjection,
RobertaTokenizer,
SpeechTaHifiGan,
SpeechTaHifiGanConfig,
)
from diffusers import (
AudioLDMPipeline,
AutoencoderKL,
DDIMScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class snake_case__ ( lowerCamelCase__ , unittest.TestCase ):
lowercase__ : Any = AudioLDMPipeline
lowercase__ : Union[str, Any] = TEXT_TO_AUDIO_PARAMS
lowercase__ : Union[str, Any] = TEXT_TO_AUDIO_BATCH_PARAMS
lowercase__ : Dict = frozenset(
[
'''num_inference_steps''',
'''num_waveforms_per_prompt''',
'''generator''',
'''latents''',
'''output_type''',
'''return_dict''',
'''callback''',
'''callback_steps''',
] )
def __magic_name__ ( self ) -> List[str]:
torch.manual_seed(0 )
__magic_name__ : Dict = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=(32, 64) , class_embed_type="""simple_projection""" , projection_class_embeddings_input_dim=32 , class_embeddings_concat=__lowercase , )
__magic_name__ : Union[str, Any] = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=__lowercase , set_alpha_to_one=__lowercase , )
torch.manual_seed(0 )
__magic_name__ : int = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=1 , out_channels=1 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
__magic_name__ : Dict = ClapTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , projection_dim=32 , )
__magic_name__ : List[str] = ClapTextModelWithProjection(__lowercase )
__magic_name__ : Optional[int] = RobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-random-roberta""" , model_max_length=77 )
__magic_name__ : List[Any] = SpeechTaHifiGanConfig(
model_in_dim=8 , sampling_rate=1_60_00 , upsample_initial_channel=16 , upsample_rates=[2, 2] , upsample_kernel_sizes=[4, 4] , resblock_kernel_sizes=[3, 7] , resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] , normalize_before=__lowercase , )
__magic_name__ : Union[str, Any] = SpeechTaHifiGan(__lowercase )
__magic_name__ : Tuple = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""vocoder""": vocoder,
}
return components
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__=0 ) -> Optional[int]:
if str(__lowercase ).startswith("""mps""" ):
__magic_name__ : Optional[Any] = torch.manual_seed(__lowercase )
else:
__magic_name__ : Any = torch.Generator(device=__lowercase ).manual_seed(__lowercase )
__magic_name__ : Tuple = {
"""prompt""": """A hammer hitting a wooden surface""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
}
return inputs
def __magic_name__ ( self ) -> str:
__magic_name__ : Optional[Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator
__magic_name__ : Union[str, Any] = self.get_dummy_components()
__magic_name__ : Union[str, Any] = AudioLDMPipeline(**__lowercase )
__magic_name__ : Tuple = audioldm_pipe.to(__lowercase )
audioldm_pipe.set_progress_bar_config(disable=__lowercase )
__magic_name__ : Any = self.get_dummy_inputs(__lowercase )
__magic_name__ : int = audioldm_pipe(**__lowercase )
__magic_name__ : Optional[Any] = output.audios[0]
assert audio.ndim == 1
assert len(__lowercase ) == 2_56
__magic_name__ : int = audio[:10]
__magic_name__ : Optional[Any] = np.array(
[-0.0_0_5_0, 0.0_0_5_0, -0.0_0_6_0, 0.0_0_3_3, -0.0_0_2_6, 0.0_0_3_3, -0.0_0_2_7, 0.0_0_3_3, -0.0_0_2_8, 0.0_0_3_3] )
assert np.abs(audio_slice - expected_slice ).max() < 1e-2
def __magic_name__ ( self ) -> List[str]:
__magic_name__ : str = self.get_dummy_components()
__magic_name__ : str = AudioLDMPipeline(**__lowercase )
__magic_name__ : Tuple = audioldm_pipe.to(__lowercase )
__magic_name__ : Tuple = audioldm_pipe.to(__lowercase )
audioldm_pipe.set_progress_bar_config(disable=__lowercase )
__magic_name__ : Optional[Any] = self.get_dummy_inputs(__lowercase )
__magic_name__ : Union[str, Any] = 3 * [inputs["""prompt"""]]
# forward
__magic_name__ : Any = audioldm_pipe(**__lowercase )
__magic_name__ : Dict = output.audios[0]
__magic_name__ : Union[str, Any] = self.get_dummy_inputs(__lowercase )
__magic_name__ : Any = 3 * [inputs.pop("""prompt""" )]
__magic_name__ : Tuple = audioldm_pipe.tokenizer(
__lowercase , padding="""max_length""" , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=__lowercase , return_tensors="""pt""" , )
__magic_name__ : str = text_inputs["""input_ids"""].to(__lowercase )
__magic_name__ : List[str] = audioldm_pipe.text_encoder(
__lowercase , )
__magic_name__ : Optional[int] = prompt_embeds.text_embeds
# additional L_2 normalization over each hidden-state
__magic_name__ : Union[str, Any] = F.normalize(__lowercase , dim=-1 )
__magic_name__ : Dict = prompt_embeds
# forward
__magic_name__ : Tuple = audioldm_pipe(**__lowercase )
__magic_name__ : Dict = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1e-2
def __magic_name__ ( self ) -> Any:
__magic_name__ : Dict = self.get_dummy_components()
__magic_name__ : List[Any] = AudioLDMPipeline(**__lowercase )
__magic_name__ : Optional[int] = audioldm_pipe.to(__lowercase )
__magic_name__ : str = audioldm_pipe.to(__lowercase )
audioldm_pipe.set_progress_bar_config(disable=__lowercase )
__magic_name__ : Tuple = self.get_dummy_inputs(__lowercase )
__magic_name__ : Any = 3 * ["""this is a negative prompt"""]
__magic_name__ : int = negative_prompt
__magic_name__ : Optional[int] = 3 * [inputs["""prompt"""]]
# forward
__magic_name__ : Any = audioldm_pipe(**__lowercase )
__magic_name__ : List[Any] = output.audios[0]
__magic_name__ : List[Any] = self.get_dummy_inputs(__lowercase )
__magic_name__ : str = 3 * [inputs.pop("""prompt""" )]
__magic_name__ : List[str] = []
for p in [prompt, negative_prompt]:
__magic_name__ : Optional[int] = audioldm_pipe.tokenizer(
__lowercase , padding="""max_length""" , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=__lowercase , return_tensors="""pt""" , )
__magic_name__ : Dict = text_inputs["""input_ids"""].to(__lowercase )
__magic_name__ : int = audioldm_pipe.text_encoder(
__lowercase , )
__magic_name__ : int = text_embeds.text_embeds
# additional L_2 normalization over each hidden-state
__magic_name__ : int = F.normalize(__lowercase , dim=-1 )
embeds.append(__lowercase )
__magic_name__ ,__magic_name__ : List[Any] = embeds
# forward
__magic_name__ : int = audioldm_pipe(**__lowercase )
__magic_name__ : List[Any] = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1e-2
def __magic_name__ ( self ) -> str:
__magic_name__ : Union[str, Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator
__magic_name__ : Optional[int] = self.get_dummy_components()
__magic_name__ : Dict = PNDMScheduler(skip_prk_steps=__lowercase )
__magic_name__ : List[Any] = AudioLDMPipeline(**__lowercase )
__magic_name__ : Union[str, Any] = audioldm_pipe.to(__lowercase )
audioldm_pipe.set_progress_bar_config(disable=__lowercase )
__magic_name__ : Optional[int] = self.get_dummy_inputs(__lowercase )
__magic_name__ : List[str] = """egg cracking"""
__magic_name__ : Tuple = audioldm_pipe(**__lowercase , negative_prompt=__lowercase )
__magic_name__ : List[Any] = output.audios[0]
assert audio.ndim == 1
assert len(__lowercase ) == 2_56
__magic_name__ : Union[str, Any] = audio[:10]
__magic_name__ : Optional[int] = np.array(
[-0.0_0_5_1, 0.0_0_5_0, -0.0_0_6_0, 0.0_0_3_4, -0.0_0_2_6, 0.0_0_3_3, -0.0_0_2_7, 0.0_0_3_3, -0.0_0_2_8, 0.0_0_3_2] )
assert np.abs(audio_slice - expected_slice ).max() < 1e-2
def __magic_name__ ( self ) -> Any:
__magic_name__ : Optional[Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator
__magic_name__ : Optional[int] = self.get_dummy_components()
__magic_name__ : Union[str, Any] = PNDMScheduler(skip_prk_steps=__lowercase )
__magic_name__ : str = AudioLDMPipeline(**__lowercase )
__magic_name__ : Optional[int] = audioldm_pipe.to(__lowercase )
audioldm_pipe.set_progress_bar_config(disable=__lowercase )
__magic_name__ : Optional[int] = """A hammer hitting a wooden surface"""
# test num_waveforms_per_prompt=1 (default)
__magic_name__ : List[str] = audioldm_pipe(__lowercase , num_inference_steps=2 ).audios
assert audios.shape == (1, 2_56)
# test num_waveforms_per_prompt=1 (default) for batch of prompts
__magic_name__ : Tuple = 2
__magic_name__ : Dict = audioldm_pipe([prompt] * batch_size , num_inference_steps=2 ).audios
assert audios.shape == (batch_size, 2_56)
# test num_waveforms_per_prompt for single prompt
__magic_name__ : Dict = 2
__magic_name__ : Optional[Any] = audioldm_pipe(__lowercase , num_inference_steps=2 , num_waveforms_per_prompt=__lowercase ).audios
assert audios.shape == (num_waveforms_per_prompt, 2_56)
# test num_waveforms_per_prompt for batch of prompts
__magic_name__ : int = 2
__magic_name__ : Optional[Any] = audioldm_pipe(
[prompt] * batch_size , num_inference_steps=2 , num_waveforms_per_prompt=__lowercase ).audios
assert audios.shape == (batch_size * num_waveforms_per_prompt, 2_56)
def __magic_name__ ( self ) -> Optional[Any]:
__magic_name__ : Optional[int] = """cpu""" # ensure determinism for the device-dependent torch.Generator
__magic_name__ : Optional[int] = self.get_dummy_components()
__magic_name__ : List[str] = AudioLDMPipeline(**__lowercase )
__magic_name__ : Optional[Any] = audioldm_pipe.to(__lowercase )
audioldm_pipe.set_progress_bar_config(disable=__lowercase )
__magic_name__ : Optional[int] = audioldm_pipe.vocoder.config.sampling_rate
__magic_name__ : Tuple = self.get_dummy_inputs(__lowercase )
__magic_name__ : Optional[int] = audioldm_pipe(audio_length_in_s=0.0_1_6 , **__lowercase )
__magic_name__ : Tuple = output.audios[0]
assert audio.ndim == 1
assert len(__lowercase ) / vocoder_sampling_rate == 0.0_1_6
__magic_name__ : str = audioldm_pipe(audio_length_in_s=0.0_3_2 , **__lowercase )
__magic_name__ : Optional[Any] = output.audios[0]
assert audio.ndim == 1
assert len(__lowercase ) / vocoder_sampling_rate == 0.0_3_2
def __magic_name__ ( self ) -> List[str]:
__magic_name__ : Any = self.get_dummy_components()
__magic_name__ : str = AudioLDMPipeline(**__lowercase )
__magic_name__ : str = audioldm_pipe.to(__lowercase )
audioldm_pipe.set_progress_bar_config(disable=__lowercase )
__magic_name__ : Union[str, Any] = ["""hey"""]
__magic_name__ : int = audioldm_pipe(__lowercase , num_inference_steps=1 )
__magic_name__ : List[str] = output.audios.shape
assert audio_shape == (1, 2_56)
__magic_name__ : Dict = audioldm_pipe.vocoder.config
config.model_in_dim *= 2
__magic_name__ : Tuple = SpeechTaHifiGan(__lowercase ).to(__lowercase )
__magic_name__ : str = audioldm_pipe(__lowercase , num_inference_steps=1 )
__magic_name__ : str = output.audios.shape
# waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram
assert audio_shape == (1, 2_56)
def __magic_name__ ( self ) -> Any:
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=__lowercase )
def __magic_name__ ( self ) -> str:
self._test_inference_batch_single_identical(test_mean_pixel_difference=__lowercase )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def __magic_name__ ( self ) -> Optional[int]:
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=__lowercase )
@slow
class snake_case__ ( unittest.TestCase ):
def __magic_name__ ( self ) -> List[str]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__="cpu" , lowerCAmelCase__=torch.floataa , lowerCAmelCase__=0 ) -> List[Any]:
__magic_name__ : Any = torch.Generator(device=__lowercase ).manual_seed(__lowercase )
__magic_name__ : Tuple = np.random.RandomState(__lowercase ).standard_normal((1, 8, 1_28, 16) )
__magic_name__ : Optional[int] = torch.from_numpy(__lowercase ).to(device=__lowercase , dtype=__lowercase )
__magic_name__ : int = {
"""prompt""": """A hammer hitting a wooden surface""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 2.5,
}
return inputs
def __magic_name__ ( self ) -> Union[str, Any]:
__magic_name__ : Optional[int] = AudioLDMPipeline.from_pretrained("""cvssp/audioldm""" )
__magic_name__ : int = audioldm_pipe.to(__lowercase )
audioldm_pipe.set_progress_bar_config(disable=__lowercase )
__magic_name__ : Dict = self.get_inputs(__lowercase )
__magic_name__ : Dict = 25
__magic_name__ : List[str] = audioldm_pipe(**__lowercase ).audios[0]
assert audio.ndim == 1
assert len(__lowercase ) == 8_19_20
__magic_name__ : Optional[int] = audio[7_72_30:7_72_40]
__magic_name__ : Dict = np.array(
[-0.4_8_8_4, -0.4_6_0_7, 0.0_0_2_3, 0.5_0_0_7, 0.5_8_9_6, 0.5_1_5_1, 0.3_8_1_3, -0.0_2_0_8, -0.3_6_8_7, -0.4_3_1_5] )
__magic_name__ : Dict = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 1e-2
def __magic_name__ ( self ) -> List[Any]:
__magic_name__ : List[str] = AudioLDMPipeline.from_pretrained("""cvssp/audioldm""" )
__magic_name__ : Optional[Any] = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config )
__magic_name__ : Union[str, Any] = audioldm_pipe.to(__lowercase )
audioldm_pipe.set_progress_bar_config(disable=__lowercase )
__magic_name__ : Union[str, Any] = self.get_inputs(__lowercase )
__magic_name__ : str = audioldm_pipe(**__lowercase ).audios[0]
assert audio.ndim == 1
assert len(__lowercase ) == 8_19_20
__magic_name__ : str = audio[2_77_80:2_77_90]
__magic_name__ : Optional[int] = np.array([-0.2_1_3_1, -0.0_8_7_3, -0.0_1_2_4, -0.0_1_8_9, 0.0_5_6_9, 0.1_3_7_3, 0.1_8_8_3, 0.2_8_8_6, 0.3_2_9_7, 0.2_2_1_2] )
__magic_name__ : Dict = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 3e-2
| 342
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_batched,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
lowerCamelCase__ = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
__lowerCamelCase : Dict =['pixel_values']
def __init__( self : Optional[int] , __lowercase : bool = True , __lowercase : Optional[Dict[str, int]] = None , __lowercase : PILImageResampling = PILImageResampling.BICUBIC , __lowercase : bool = True , __lowercase : bool = True , __lowercase : Union[int, float] = 1 / 255 , __lowercase : Dict[str, int] = None , __lowercase : bool = True , __lowercase : Optional[Union[float, List[float]]] = None , __lowercase : Optional[Union[float, List[float]]] = None , **__lowercase : Dict , ):
'''simple docstring'''
super().__init__(**__lowercase )
__a = size if size is not None else {"""height""": 224, """width""": 224}
__a = get_size_dict(__lowercase )
__a = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
__a = get_size_dict(__lowercase , default_to_square=__lowercase , param_name="""crop_size""" )
__a = do_resize
__a = do_rescale
__a = do_normalize
__a = do_center_crop
__a = crop_size
__a = size
__a = resample
__a = rescale_factor
__a = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
__a = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def UpperCamelCase_ ( self : Any , __lowercase : np.ndarray , __lowercase : Dict[str, int] , __lowercase : PILImageResampling = PILImageResampling.BILINEAR , __lowercase : Optional[Union[str, ChannelDimension]] = None , **__lowercase : Optional[Any] , ):
'''simple docstring'''
__a = get_size_dict(__lowercase )
if "shortest_edge" in size:
__a = get_resize_output_image_size(__lowercase , size=size["""shortest_edge"""] , default_to_square=__lowercase )
# size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"])
elif "height" in size and "width" in size:
__a = (size["""height"""], size["""width"""])
else:
raise ValueError(F"Size must contain 'height' and 'width' keys or 'shortest_edge' key. Got {size.keys()}" )
return resize(__lowercase , size=__lowercase , resample=__lowercase , data_format=__lowercase , **__lowercase )
def UpperCamelCase_ ( self : str , __lowercase : np.ndarray , __lowercase : Dict[str, int] , __lowercase : Optional[Union[str, ChannelDimension]] = None , **__lowercase : List[Any] , ):
'''simple docstring'''
__a = get_size_dict(__lowercase )
if "height" not in size or "width" not in size:
raise ValueError(F"The `size` parameter must contain the keys (height, width). Got {size.keys()}" )
return center_crop(__lowercase , size=(size["""height"""], size["""width"""]) , data_format=__lowercase , **__lowercase )
def UpperCamelCase_ ( self : Any , __lowercase : np.ndarray , __lowercase : float , __lowercase : Optional[Union[str, ChannelDimension]] = None , **__lowercase : str ):
'''simple docstring'''
return rescale(__lowercase , scale=__lowercase , data_format=__lowercase , **__lowercase )
def UpperCamelCase_ ( self : List[Any] , __lowercase : np.ndarray , __lowercase : Union[float, List[float]] , __lowercase : Union[float, List[float]] , __lowercase : Optional[Union[str, ChannelDimension]] = None , **__lowercase : Any , ):
'''simple docstring'''
return normalize(__lowercase , mean=__lowercase , std=__lowercase , data_format=__lowercase , **__lowercase )
def UpperCamelCase_ ( self : Tuple , __lowercase : ImageInput , __lowercase : Optional[bool] = None , __lowercase : Dict[str, int] = None , __lowercase : PILImageResampling = None , __lowercase : bool = None , __lowercase : int = None , __lowercase : Optional[bool] = None , __lowercase : Optional[float] = None , __lowercase : Optional[bool] = None , __lowercase : Optional[Union[float, List[float]]] = None , __lowercase : Optional[Union[float, List[float]]] = None , __lowercase : Optional[Union[str, TensorType]] = None , __lowercase : Union[str, ChannelDimension] = ChannelDimension.FIRST , **__lowercase : List[Any] , ):
'''simple docstring'''
__a = do_resize if do_resize is not None else self.do_resize
__a = do_rescale if do_rescale is not None else self.do_rescale
__a = do_normalize if do_normalize is not None else self.do_normalize
__a = do_center_crop if do_center_crop is not None else self.do_center_crop
__a = crop_size if crop_size is not None else self.crop_size
__a = get_size_dict(__lowercase , param_name="""crop_size""" , default_to_square=__lowercase )
__a = resample if resample is not None else self.resample
__a = rescale_factor if rescale_factor is not None else self.rescale_factor
__a = image_mean if image_mean is not None else self.image_mean
__a = image_std if image_std is not None else self.image_std
__a = size if size is not None else self.size
__a = get_size_dict(__lowercase )
if not is_batched(__lowercase ):
__a = [images]
if not valid_images(__lowercase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
# All transformations expect numpy arrays.
__a = [to_numpy_array(__lowercase ) for image in images]
if do_resize:
__a = [self.resize(image=__lowercase , size=__lowercase , resample=__lowercase ) for image in images]
if do_center_crop:
__a = [self.center_crop(image=__lowercase , size=__lowercase ) for image in images]
if do_rescale:
__a = [self.rescale(image=__lowercase , scale=__lowercase ) for image in images]
if do_normalize:
__a = [self.normalize(image=__lowercase , mean=__lowercase , std=__lowercase ) for image in images]
__a = [to_channel_dimension_format(__lowercase , __lowercase ) for image in images]
__a = {"""pixel_values""": images}
return BatchFeature(data=__lowercase , tensor_type=__lowercase )
| 302
| 0
|
'''simple docstring'''
from manim import *
class lowerCAmelCase_( lowerCamelCase__ ):
'''simple docstring'''
def UpperCAmelCase_ ( self ) -> str:
lowerCAmelCase__ : List[Any] = Rectangle(height=0.5 ,width=0.5 )
lowerCAmelCase__ : List[str] = Rectangle(height=0.2_5 ,width=0.2_5 )
lowerCAmelCase__ : Dict = Rectangle(height=0.4_6 ,width=0.4_6 ).set_stroke(width=0 )
lowerCAmelCase__ : Tuple = [mem.copy() for i in range(6 )]
lowerCAmelCase__ : Optional[Any] = [mem.copy() for i in range(6 )]
lowerCAmelCase__ : int = VGroup(*__lowercase ).arrange(__lowercase ,buff=0 )
lowerCAmelCase__ : Optional[int] = VGroup(*__lowercase ).arrange(__lowercase ,buff=0 )
lowerCAmelCase__ : Any = VGroup(__lowercase ,__lowercase ).arrange(__lowercase ,buff=0 )
lowerCAmelCase__ : Any = Text("""CPU""" ,font_size=24 )
lowerCAmelCase__ : Tuple = Group(__lowercase ,__lowercase ).arrange(__lowercase ,buff=0.5 ,aligned_edge=__lowercase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__lowercase )
lowerCAmelCase__ : Dict = [mem.copy() for i in range(4 )]
lowerCAmelCase__ : Optional[Any] = VGroup(*__lowercase ).arrange(__lowercase ,buff=0 )
lowerCAmelCase__ : Any = Text("""GPU""" ,font_size=24 )
lowerCAmelCase__ : str = Group(__lowercase ,__lowercase ).arrange(__lowercase ,buff=0.5 ,aligned_edge=__lowercase )
gpu.move_to([-1, -1, 0] )
self.add(__lowercase )
lowerCAmelCase__ : List[str] = [mem.copy() for i in range(6 )]
lowerCAmelCase__ : int = VGroup(*__lowercase ).arrange(__lowercase ,buff=0 )
lowerCAmelCase__ : Any = Text("""Model""" ,font_size=24 )
lowerCAmelCase__ : Dict = Group(__lowercase ,__lowercase ).arrange(__lowercase ,buff=0.5 ,aligned_edge=__lowercase )
model.move_to([3, -1.0, 0] )
self.add(__lowercase )
lowerCAmelCase__ : Optional[int] = []
lowerCAmelCase__ : List[str] = []
lowerCAmelCase__ : Dict = []
for i, rect in enumerate(__lowercase ):
rect.set_stroke(__lowercase )
lowerCAmelCase__ : List[str] = Rectangle(height=0.4_6 / 4 ,width=0.4_6 / 3 ).set_stroke(width=0.0 ).set_fill(__lowercase ,opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) ,buff=0.0_2 ,direction=__lowercase )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(model_cpu_arr[0] ,direction=__lowercase ,buff=0.0 )
else:
cpu_target.next_to(model_cpu_arr[i - 1] ,direction=__lowercase ,buff=0.0 )
self.add(__lowercase )
model_cpu_arr.append(__lowercase )
self.add(*__lowercase ,*__lowercase ,*__lowercase )
lowerCAmelCase__ : str = [mem.copy() for i in range(6 )]
lowerCAmelCase__ : str = VGroup(*__lowercase ).arrange(__lowercase ,buff=0 )
lowerCAmelCase__ : str = Text("""Loaded Checkpoint""" ,font_size=24 )
lowerCAmelCase__ : Any = Group(__lowercase ,__lowercase ).arrange(__lowercase ,buff=0.5 ,aligned_edge=__lowercase )
checkpoint.move_to([3, 0.5, 0] )
self.add(__lowercase )
lowerCAmelCase__ : str = []
lowerCAmelCase__ : int = []
for i, rect in enumerate(__lowercase ):
lowerCAmelCase__ : Any = fill.copy().set_fill(__lowercase ,opacity=0.7 )
target.move_to(__lowercase )
ckpt_arr.append(__lowercase )
lowerCAmelCase__ : Optional[Any] = target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.move_to(cpu_right_col_base[i - 5] )
ckpt_cpu_arr.append(__lowercase )
self.add(*__lowercase ,*__lowercase )
lowerCAmelCase__ : str = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
lowerCAmelCase__ : List[str] = MarkupText(
F"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" ,font_size=18 ,)
key_text.move_to([-5, 2.4, 0] )
self.add(__lowercase ,__lowercase )
lowerCAmelCase__ : Any = MarkupText(
F"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" ,font_size=18 ,)
blue_text.next_to(__lowercase ,DOWN * 2.4 ,aligned_edge=key_text.get_left() )
self.add(__lowercase )
lowerCAmelCase__ : Union[str, Any] = MarkupText(
F"""Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.""" ,font_size=24 ,)
step_a.move_to([2, 2, 0] )
lowerCAmelCase__ : List[str] = [meta_mem.copy() for i in range(6 )]
lowerCAmelCase__ : Any = [meta_mem.copy() for i in range(6 )]
lowerCAmelCase__ : Union[str, Any] = VGroup(*__lowercase ).arrange(__lowercase ,buff=0 )
lowerCAmelCase__ : str = VGroup(*__lowercase ).arrange(__lowercase ,buff=0 )
lowerCAmelCase__ : List[str] = VGroup(__lowercase ,__lowercase ).arrange(__lowercase ,buff=0 )
lowerCAmelCase__ : List[Any] = Text("""Disk""" ,font_size=24 )
lowerCAmelCase__ : Dict = Group(__lowercase ,__lowercase ).arrange(__lowercase ,buff=0.5 ,aligned_edge=__lowercase )
disk.move_to([-4.0, -1.2_5, 0] )
self.play(Write(__lowercase ,run_time=3 ) ,Write(__lowercase ,run_time=1 ) ,Create(__lowercase ,run_time=1 ) )
lowerCAmelCase__ : Optional[int] = []
for i, rect in enumerate(__lowercase ):
lowerCAmelCase__ : str = rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i] ).scale(0.5 )
animations.append(MoveToTarget(__lowercase ,run_time=1.5 ) )
self.play(*__lowercase )
self.play(FadeOut(__lowercase ) )
lowerCAmelCase__ : Tuple = MarkupText(F"""Then, the checkpoint is removed from memory\nthrough garbage collection.""" ,font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(__lowercase ,run_time=3 ) )
self.play(
FadeOut(__lowercase ,__lowercase ,*__lowercase ,*__lowercase ) ,)
self.wait()
| 37
|
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
__a = """hf-internal-testing/tiny-random-t5"""
__a = AutoTokenizer.from_pretrained(__lowercase )
__a = AutoModelForSeqaSeqLM.from_pretrained(__lowercase )
__a = tokenizer("""This is me""" , return_tensors="""pt""" )
__a = model.to_bettertransformer()
self.assertTrue(any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model.named_modules() ) )
__a = model.generate(**__lowercase )
__a = model.reverse_bettertransformer()
self.assertFalse(any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model.named_modules() ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__lowercase )
__a = AutoModelForSeqaSeqLM.from_pretrained(__lowercase )
self.assertFalse(
any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) )
__a = model_reloaded.generate(**__lowercase )
self.assertTrue(torch.allclose(__lowercase , __lowercase ) )
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
__a = """hf-internal-testing/tiny-random-t5"""
__a = AutoModelForSeqaSeqLM.from_pretrained(__lowercase )
__a = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(__lowercase ):
model.save_pretrained(__lowercase )
__a = model.reverse_bettertransformer()
model.save_pretrained(__lowercase )
| 302
| 0
|
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoImageProcessor, ViTImageProcessor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / 'utils'))
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
lowercase : Optional[Any] = get_tests_dir('fixtures')
class lowerCamelCase__ ( unittest.TestCase):
'''simple docstring'''
def _lowerCamelCase ( self :Union[str, Any] ) -> Tuple:
__UpperCamelCase : Tuple = mock.Mock()
__UpperCamelCase : List[str] = 5_0_0
__UpperCamelCase : Any = {}
__UpperCamelCase : Optional[int] = HTTPError
__UpperCamelCase : Any = {}
# Download this model to make sure it's in the cache.
__UpperCamelCase : Dict = ViTImageProcessor.from_pretrained("hf-internal-testing/tiny-random-vit" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("requests.Session.request" , return_value=__lowercase ) as mock_head:
__UpperCamelCase : Dict = ViTImageProcessor.from_pretrained("hf-internal-testing/tiny-random-vit" )
# This check we did call the fake head request
mock_head.assert_called()
def _lowerCamelCase ( self :int ) -> Tuple:
__UpperCamelCase : List[str] = ViTImageProcessor.from_pretrained(
"https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json" )
def _lowerCamelCase ( self :List[str] ) -> List[Any]:
with self.assertRaises(__lowercase ):
# config is in subfolder, the following should not work without specifying the subfolder
__UpperCamelCase : Any = AutoImageProcessor.from_pretrained("hf-internal-testing/stable-diffusion-all-variants" )
__UpperCamelCase : Tuple = AutoImageProcessor.from_pretrained(
"hf-internal-testing/stable-diffusion-all-variants" , subfolder="feature_extractor" )
self.assertIsNotNone(__lowercase )
@is_staging_test
class lowerCamelCase__ ( unittest.TestCase):
'''simple docstring'''
@classmethod
def _lowerCamelCase ( cls :Optional[int] ) -> List[str]:
__UpperCamelCase : Optional[Any] = TOKEN
HfFolder.save_token(__lowercase )
@classmethod
def _lowerCamelCase ( cls :Optional[int] ) -> Tuple:
try:
delete_repo(token=cls._token , repo_id="test-image-processor" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-image-processor-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-image-processor" )
except HTTPError:
pass
def _lowerCamelCase ( self :str ) -> Optional[int]:
__UpperCamelCase : Dict = ViTImageProcessor.from_pretrained(__lowercase )
image_processor.push_to_hub("test-image-processor" , use_auth_token=self._token )
__UpperCamelCase : Optional[Any] = ViTImageProcessor.from_pretrained(f'{USER}/test-image-processor' )
for k, v in image_processor.__dict__.items():
self.assertEqual(__lowercase , getattr(__lowercase , __lowercase ) )
# Reset repo
delete_repo(token=self._token , repo_id="test-image-processor" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
__lowercase , repo_id="test-image-processor" , push_to_hub=__lowercase , use_auth_token=self._token )
__UpperCamelCase : List[Any] = ViTImageProcessor.from_pretrained(f'{USER}/test-image-processor' )
for k, v in image_processor.__dict__.items():
self.assertEqual(__lowercase , getattr(__lowercase , __lowercase ) )
def _lowerCamelCase ( self :Dict ) -> Union[str, Any]:
__UpperCamelCase : int = ViTImageProcessor.from_pretrained(__lowercase )
image_processor.push_to_hub("valid_org/test-image-processor" , use_auth_token=self._token )
__UpperCamelCase : Union[str, Any] = ViTImageProcessor.from_pretrained("valid_org/test-image-processor" )
for k, v in image_processor.__dict__.items():
self.assertEqual(__lowercase , getattr(__lowercase , __lowercase ) )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-image-processor" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
__lowercase , repo_id="valid_org/test-image-processor-org" , push_to_hub=__lowercase , use_auth_token=self._token )
__UpperCamelCase : Optional[int] = ViTImageProcessor.from_pretrained("valid_org/test-image-processor-org" )
for k, v in image_processor.__dict__.items():
self.assertEqual(__lowercase , getattr(__lowercase , __lowercase ) )
def _lowerCamelCase ( self :Tuple ) -> str:
CustomImageProcessor.register_for_auto_class()
__UpperCamelCase : Tuple = CustomImageProcessor.from_pretrained(__lowercase )
image_processor.push_to_hub("test-dynamic-image-processor" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
image_processor.auto_map , {"AutoImageProcessor": "custom_image_processing.CustomImageProcessor"} , )
__UpperCamelCase : Optional[Any] = AutoImageProcessor.from_pretrained(
f'{USER}/test-dynamic-image-processor' , trust_remote_code=__lowercase )
# Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module
self.assertEqual(new_image_processor.__class__.__name__ , "CustomImageProcessor" )
| 232
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
lowerCamelCase__ = {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/config.json""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/config.json""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/config.json""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/config.json""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/config.json""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/config.json""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json""",
}
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
__lowerCamelCase : Optional[Any] ='albert'
def __init__( self : Optional[Any] , __lowercase : Union[str, Any]=30000 , __lowercase : List[str]=128 , __lowercase : Optional[Any]=4096 , __lowercase : Dict=12 , __lowercase : Any=1 , __lowercase : Optional[Any]=64 , __lowercase : Any=16384 , __lowercase : Any=1 , __lowercase : Union[str, Any]="gelu_new" , __lowercase : List[str]=0 , __lowercase : int=0 , __lowercase : Dict=512 , __lowercase : str=2 , __lowercase : List[str]=0.02 , __lowercase : Union[str, Any]=1E-12 , __lowercase : int=0.1 , __lowercase : Any="absolute" , __lowercase : Optional[int]=0 , __lowercase : Dict=2 , __lowercase : Optional[Any]=3 , **__lowercase : Any , ):
'''simple docstring'''
super().__init__(pad_token_id=__lowercase , bos_token_id=__lowercase , eos_token_id=__lowercase , **__lowercase )
__a = vocab_size
__a = embedding_size
__a = hidden_size
__a = num_hidden_layers
__a = num_hidden_groups
__a = num_attention_heads
__a = inner_group_num
__a = hidden_act
__a = intermediate_size
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = type_vocab_size
__a = initializer_range
__a = layer_norm_eps
__a = classifier_dropout_prob
__a = position_embedding_type
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
@property
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
if self.task == "multiple-choice":
__a = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
__a = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 302
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.