code stringlengths 82 53.2k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
import argparse
import json
from tqdm import tqdm
def __lowerCamelCase ( ) -> Optional[int]:
__UpperCamelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--src_path""" , type=__lowerCAmelCase , default="""biencoder-nq-dev.json""" , help="""Path to raw DPR training data""" , )
parser.add_argument(
"""--evaluation_set""" , type=__lowerCAmelCase , help="""where to store parsed evaluation_set file""" , )
parser.add_argument(
"""--gold_data_path""" , type=__lowerCAmelCase , help="""where to store parsed gold_data_path file""" , )
__UpperCamelCase : int = parser.parse_args()
with open(args.src_path , """r""" ) as src_file, open(args.evaluation_set , """w""" ) as eval_file, open(
args.gold_data_path , """w""" ) as gold_file:
__UpperCamelCase : str = json.load(__lowerCAmelCase )
for dpr_record in tqdm(__lowerCAmelCase ):
__UpperCamelCase : Union[str, Any] = dpr_record["""question"""]
__UpperCamelCase : List[str] = [context["""title"""] for context in dpr_record["""positive_ctxs"""]]
eval_file.write(question + """\n""" )
gold_file.write("""\t""".join(__lowerCAmelCase ) + """\n""" )
if __name__ == "__main__":
main()
| 269 |
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
UpperCamelCase = 'CompVis/stable-diffusion-v1-1'
UpperCamelCase = 'CompVis/stable-diffusion-v1-2'
UpperCamelCase = 'CompVis/stable-diffusion-v1-3'
UpperCamelCase = 'CompVis/stable-diffusion-v1-4'
class _A ( UpperCAmelCase_ ):
def __init__( self : List[str] , lowerCamelCase__ : AutoencoderKL , lowerCamelCase__ : CLIPTextModel , lowerCamelCase__ : CLIPTokenizer , lowerCamelCase__ : UNetaDConditionModel , lowerCamelCase__ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , lowerCamelCase__ : StableDiffusionSafetyChecker , lowerCamelCase__ : CLIPImageProcessor , lowerCamelCase__ : bool = True , ):
"""simple docstring"""
super()._init_()
__UpperCamelCase : Optional[Any] = StableDiffusionPipeline.from_pretrained(lowerCamelCase__ )
__UpperCamelCase : int = StableDiffusionPipeline.from_pretrained(lowerCamelCase__ )
__UpperCamelCase : List[str] = StableDiffusionPipeline.from_pretrained(lowerCamelCase__ )
__UpperCamelCase : str = StableDiffusionPipeline(
vae=lowerCamelCase__ , text_encoder=lowerCamelCase__ , tokenizer=lowerCamelCase__ , unet=lowerCamelCase__ , scheduler=lowerCamelCase__ , safety_checker=lowerCamelCase__ , feature_extractor=lowerCamelCase__ , requires_safety_checker=lowerCamelCase__ , )
self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea )
@property
def a ( self : Optional[Any] ):
"""simple docstring"""
return {k: getattr(self , lowerCamelCase__ ) for k in self.config.keys() if not k.startswith("""_""" )}
def a ( self : List[str] , lowerCamelCase__ : Optional[Union[str, int]] = "auto" ):
"""simple docstring"""
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
__UpperCamelCase : Dict = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowerCamelCase__ )
def a ( self : str ):
"""simple docstring"""
self.enable_attention_slicing(lowerCamelCase__ )
@torch.no_grad()
def a ( self : Any , lowerCamelCase__ : Union[str, List[str]] , lowerCamelCase__ : int = 5_12 , lowerCamelCase__ : int = 5_12 , lowerCamelCase__ : int = 50 , lowerCamelCase__ : float = 7.5 , lowerCamelCase__ : Optional[Union[str, List[str]]] = None , lowerCamelCase__ : Optional[int] = 1 , lowerCamelCase__ : float = 0.0 , lowerCamelCase__ : Optional[torch.Generator] = None , lowerCamelCase__ : Optional[torch.FloatTensor] = None , lowerCamelCase__ : Optional[str] = "pil" , lowerCamelCase__ : bool = True , lowerCamelCase__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCamelCase__ : int = 1 , **lowerCamelCase__ : str , ):
"""simple docstring"""
return self.pipea(
prompt=lowerCamelCase__ , height=lowerCamelCase__ , width=lowerCamelCase__ , num_inference_steps=lowerCamelCase__ , guidance_scale=lowerCamelCase__ , negative_prompt=lowerCamelCase__ , num_images_per_prompt=lowerCamelCase__ , eta=lowerCamelCase__ , generator=lowerCamelCase__ , latents=lowerCamelCase__ , output_type=lowerCamelCase__ , return_dict=lowerCamelCase__ , callback=lowerCamelCase__ , callback_steps=lowerCamelCase__ , **lowerCamelCase__ , )
@torch.no_grad()
def a ( self : Union[str, Any] , lowerCamelCase__ : Union[str, List[str]] , lowerCamelCase__ : int = 5_12 , lowerCamelCase__ : int = 5_12 , lowerCamelCase__ : int = 50 , lowerCamelCase__ : float = 7.5 , lowerCamelCase__ : Optional[Union[str, List[str]]] = None , lowerCamelCase__ : Optional[int] = 1 , lowerCamelCase__ : float = 0.0 , lowerCamelCase__ : Optional[torch.Generator] = None , lowerCamelCase__ : Optional[torch.FloatTensor] = None , lowerCamelCase__ : Optional[str] = "pil" , lowerCamelCase__ : bool = True , lowerCamelCase__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCamelCase__ : int = 1 , **lowerCamelCase__ : Dict , ):
"""simple docstring"""
return self.pipea(
prompt=lowerCamelCase__ , height=lowerCamelCase__ , width=lowerCamelCase__ , num_inference_steps=lowerCamelCase__ , guidance_scale=lowerCamelCase__ , negative_prompt=lowerCamelCase__ , num_images_per_prompt=lowerCamelCase__ , eta=lowerCamelCase__ , generator=lowerCamelCase__ , latents=lowerCamelCase__ , output_type=lowerCamelCase__ , return_dict=lowerCamelCase__ , callback=lowerCamelCase__ , callback_steps=lowerCamelCase__ , **lowerCamelCase__ , )
@torch.no_grad()
def a ( self : Union[str, Any] , lowerCamelCase__ : Union[str, List[str]] , lowerCamelCase__ : int = 5_12 , lowerCamelCase__ : int = 5_12 , lowerCamelCase__ : int = 50 , lowerCamelCase__ : float = 7.5 , lowerCamelCase__ : Optional[Union[str, List[str]]] = None , lowerCamelCase__ : Optional[int] = 1 , lowerCamelCase__ : float = 0.0 , lowerCamelCase__ : Optional[torch.Generator] = None , lowerCamelCase__ : Optional[torch.FloatTensor] = None , lowerCamelCase__ : Optional[str] = "pil" , lowerCamelCase__ : bool = True , lowerCamelCase__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCamelCase__ : int = 1 , **lowerCamelCase__ : Any , ):
"""simple docstring"""
return self.pipea(
prompt=lowerCamelCase__ , height=lowerCamelCase__ , width=lowerCamelCase__ , num_inference_steps=lowerCamelCase__ , guidance_scale=lowerCamelCase__ , negative_prompt=lowerCamelCase__ , num_images_per_prompt=lowerCamelCase__ , eta=lowerCamelCase__ , generator=lowerCamelCase__ , latents=lowerCamelCase__ , output_type=lowerCamelCase__ , return_dict=lowerCamelCase__ , callback=lowerCamelCase__ , callback_steps=lowerCamelCase__ , **lowerCamelCase__ , )
@torch.no_grad()
def a ( self : Dict , lowerCamelCase__ : Union[str, List[str]] , lowerCamelCase__ : int = 5_12 , lowerCamelCase__ : int = 5_12 , lowerCamelCase__ : int = 50 , lowerCamelCase__ : float = 7.5 , lowerCamelCase__ : Optional[Union[str, List[str]]] = None , lowerCamelCase__ : Optional[int] = 1 , lowerCamelCase__ : float = 0.0 , lowerCamelCase__ : Optional[torch.Generator] = None , lowerCamelCase__ : Optional[torch.FloatTensor] = None , lowerCamelCase__ : Optional[str] = "pil" , lowerCamelCase__ : bool = True , lowerCamelCase__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCamelCase__ : int = 1 , **lowerCamelCase__ : List[Any] , ):
"""simple docstring"""
return self.pipea(
prompt=lowerCamelCase__ , height=lowerCamelCase__ , width=lowerCamelCase__ , num_inference_steps=lowerCamelCase__ , guidance_scale=lowerCamelCase__ , negative_prompt=lowerCamelCase__ , num_images_per_prompt=lowerCamelCase__ , eta=lowerCamelCase__ , generator=lowerCamelCase__ , latents=lowerCamelCase__ , output_type=lowerCamelCase__ , return_dict=lowerCamelCase__ , callback=lowerCamelCase__ , callback_steps=lowerCamelCase__ , **lowerCamelCase__ , )
@torch.no_grad()
def a ( self : Optional[Any] , lowerCamelCase__ : Union[str, List[str]] , lowerCamelCase__ : int = 5_12 , lowerCamelCase__ : int = 5_12 , lowerCamelCase__ : int = 50 , lowerCamelCase__ : float = 7.5 , lowerCamelCase__ : Optional[Union[str, List[str]]] = None , lowerCamelCase__ : Optional[int] = 1 , lowerCamelCase__ : float = 0.0 , lowerCamelCase__ : Optional[torch.Generator] = None , lowerCamelCase__ : Optional[torch.FloatTensor] = None , lowerCamelCase__ : Optional[str] = "pil" , lowerCamelCase__ : bool = True , lowerCamelCase__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCamelCase__ : int = 1 , **lowerCamelCase__ : Dict , ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] = """cuda""" if torch.cuda.is_available() else """cpu"""
self.to(lowerCamelCase__ )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'`height` and `width` must be divisible by 8 but are {height} and {width}.' )
# Get first result from Stable Diffusion Checkpoint v1.1
__UpperCamelCase : Optional[int] = self.textaimg_sda_a(
prompt=lowerCamelCase__ , height=lowerCamelCase__ , width=lowerCamelCase__ , num_inference_steps=lowerCamelCase__ , guidance_scale=lowerCamelCase__ , negative_prompt=lowerCamelCase__ , num_images_per_prompt=lowerCamelCase__ , eta=lowerCamelCase__ , generator=lowerCamelCase__ , latents=lowerCamelCase__ , output_type=lowerCamelCase__ , return_dict=lowerCamelCase__ , callback=lowerCamelCase__ , callback_steps=lowerCamelCase__ , **lowerCamelCase__ , )
# Get first result from Stable Diffusion Checkpoint v1.2
__UpperCamelCase : int = self.textaimg_sda_a(
prompt=lowerCamelCase__ , height=lowerCamelCase__ , width=lowerCamelCase__ , num_inference_steps=lowerCamelCase__ , guidance_scale=lowerCamelCase__ , negative_prompt=lowerCamelCase__ , num_images_per_prompt=lowerCamelCase__ , eta=lowerCamelCase__ , generator=lowerCamelCase__ , latents=lowerCamelCase__ , output_type=lowerCamelCase__ , return_dict=lowerCamelCase__ , callback=lowerCamelCase__ , callback_steps=lowerCamelCase__ , **lowerCamelCase__ , )
# Get first result from Stable Diffusion Checkpoint v1.3
__UpperCamelCase : Optional[Any] = self.textaimg_sda_a(
prompt=lowerCamelCase__ , height=lowerCamelCase__ , width=lowerCamelCase__ , num_inference_steps=lowerCamelCase__ , guidance_scale=lowerCamelCase__ , negative_prompt=lowerCamelCase__ , num_images_per_prompt=lowerCamelCase__ , eta=lowerCamelCase__ , generator=lowerCamelCase__ , latents=lowerCamelCase__ , output_type=lowerCamelCase__ , return_dict=lowerCamelCase__ , callback=lowerCamelCase__ , callback_steps=lowerCamelCase__ , **lowerCamelCase__ , )
# Get first result from Stable Diffusion Checkpoint v1.4
__UpperCamelCase : List[Any] = self.textaimg_sda_a(
prompt=lowerCamelCase__ , height=lowerCamelCase__ , width=lowerCamelCase__ , num_inference_steps=lowerCamelCase__ , guidance_scale=lowerCamelCase__ , negative_prompt=lowerCamelCase__ , num_images_per_prompt=lowerCamelCase__ , eta=lowerCamelCase__ , generator=lowerCamelCase__ , latents=lowerCamelCase__ , output_type=lowerCamelCase__ , return_dict=lowerCamelCase__ , callback=lowerCamelCase__ , callback_steps=lowerCamelCase__ , **lowerCamelCase__ , )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 269 | 1 |
from math import atan, cos, radians, sin, tan
from .haversine_distance import haversine_distance
__a = 637_8137.0
__a = 635_6752.31_4245
__a = 6_3_7_8_1_3_7
def UpperCamelCase_ ( a_ , a_ , a_ , a_ ) ->float:
A =(AXIS_A - AXIS_B) / AXIS_A
# Parametric latitudes
# https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude
A =atan((1 - flattening) * tan(radians(a__ ) ) )
A =atan((1 - flattening) * tan(radians(a__ ) ) )
# Compute central angle between two points
# using haversine theta. sigma = haversine_distance / equatorial radius
A =haversine_distance(a__ , a__ , a__ , a__ ) / EQUATORIAL_RADIUS
# Intermediate P and Q values
A =(b_lata + b_lata) / 2
A =(b_lata - b_lata) / 2
# Intermediate X value
# X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2)
A =(sin(a__ ) ** 2) * (cos(a__ ) ** 2)
A =cos(sigma / 2 ) ** 2
A =(sigma - sin(a__ )) * (x_numerator / x_demonimator)
# Intermediate Y value
# Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2)
A =(cos(a__ ) ** 2) * (sin(a__ ) ** 2)
A =sin(sigma / 2 ) ** 2
A =(sigma + sin(a__ )) * (y_numerator / y_denominator)
return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 716 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__a = {
"""configuration_mobilevit""": ["""MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MobileViTConfig""", """MobileViTOnnxConfig"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ["""MobileViTFeatureExtractor"""]
__a = ["""MobileViTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"""MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MobileViTForImageClassification""",
"""MobileViTForSemanticSegmentation""",
"""MobileViTModel""",
"""MobileViTPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"""TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFMobileViTForImageClassification""",
"""TFMobileViTForSemanticSegmentation""",
"""TFMobileViTModel""",
"""TFMobileViTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 689 | 0 |
"""simple docstring"""
import os
import random
import sys
from . import cryptomath_module as cryptomath
from . import rabin_miller
A__ : Union[str, Any] = 3
def _lowerCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
print('''Generating primitive root of p''' )
while True:
_lowercase: Union[str, Any] = random.randrange(3 , snake_case__ )
if pow(snake_case__ , 2 , snake_case__ ) == 1:
continue
if pow(snake_case__ , snake_case__ , snake_case__ ) == 1:
continue
return g
def _lowerCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
print('''Generating prime p...''' )
_lowercase: str = rabin_miller.generate_large_prime(snake_case__ ) # select large prime number.
_lowercase: int = primitive_root(snake_case__ ) # one primitive root on modulo p.
_lowercase: Tuple = random.randrange(3 , snake_case__ ) # private_key -> have to be greater than 2 for safety.
_lowercase: Union[str, Any] = cryptomath.find_mod_inverse(pow(snake_case__ , snake_case__ , snake_case__ ) , snake_case__ )
_lowercase: Optional[int] = (key_size, e_a, e_a, p)
_lowercase: Union[str, Any] = (key_size, d)
return public_key, private_key
def _lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
if os.path.exists(f'''{name}_pubkey.txt''' ) or os.path.exists(f'''{name}_privkey.txt''' ):
print('''\nWARNING:''' )
print(
f'''"{name}_pubkey.txt" or "{name}_privkey.txt" already exists. \n'''
'''Use a different name or delete these files and re-run this program.''' )
sys.exit()
_lowercase: List[str] = generate_key(snake_case__ )
print(f'''\nWriting public key to file {name}_pubkey.txt...''' )
with open(f'''{name}_pubkey.txt''' , '''w''' ) as fo:
fo.write(f'''{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}''' )
print(f'''Writing private key to file {name}_privkey.txt...''' )
with open(f'''{name}_privkey.txt''' , '''w''' ) as fo:
fo.write(f'''{private_key[0]},{private_key[1]}''' )
def _lowerCAmelCase ( ):
"""simple docstring"""
print('''Making key files...''' )
make_key_files('''elgamal''' , 2_048 )
print('''Key files generation successful''' )
if __name__ == "__main__":
main()
| 353 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class snake_case :
"""simple docstring"""
_lowerCAmelCase = LEDConfig
_lowerCAmelCase = {}
_lowerCAmelCase = 'gelu'
def __init__( self , lowerCamelCase , lowerCamelCase=13 , lowerCamelCase=7 , lowerCamelCase=True , lowerCamelCase=False , lowerCamelCase=99 , lowerCamelCase=32 , lowerCamelCase=2 , lowerCamelCase=4 , lowerCamelCase=37 , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=20 , lowerCamelCase=2 , lowerCamelCase=1 , lowerCamelCase=0 , lowerCamelCase=4 , ) -> Dict:
"""simple docstring"""
snake_case__ : int = parent
snake_case__ : Union[str, Any] = batch_size
snake_case__ : Optional[Any] = seq_length
snake_case__ : Any = is_training
snake_case__ : Any = use_labels
snake_case__ : Optional[Any] = vocab_size
snake_case__ : List[Any] = hidden_size
snake_case__ : Optional[int] = num_hidden_layers
snake_case__ : List[str] = num_attention_heads
snake_case__ : Tuple = intermediate_size
snake_case__ : List[str] = hidden_dropout_prob
snake_case__ : str = attention_probs_dropout_prob
snake_case__ : Optional[Any] = max_position_embeddings
snake_case__ : List[Any] = eos_token_id
snake_case__ : Optional[Any] = pad_token_id
snake_case__ : Tuple = bos_token_id
snake_case__ : Optional[Any] = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
snake_case__ : Optional[Any] = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
snake_case__ : Optional[Any] = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def lowercase__ ( self ) -> int:
"""simple docstring"""
snake_case__ : Dict = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
snake_case__ : Union[str, Any] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
snake_case__ : List[str] = tf.concat([input_ids, eos_tensor] , axis=1 )
snake_case__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case__ : Union[str, Any] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
snake_case__ : str = prepare_led_inputs_dict(lowerCamelCase , lowerCamelCase , lowerCamelCase )
snake_case__ : str = tf.concat(
[tf.zeros_like(lowerCamelCase )[:, :-1], tf.ones_like(lowerCamelCase )[:, -1:]] , axis=-1 , )
snake_case__ : List[str] = global_attention_mask
return config, inputs_dict
def lowercase__ ( self , lowerCamelCase , lowerCamelCase ) -> Any:
"""simple docstring"""
snake_case__ : Optional[Any] = TFLEDModel(config=lowerCamelCase ).get_decoder()
snake_case__ : str = inputs_dict['''input_ids''']
snake_case__ : Tuple = input_ids[:1, :]
snake_case__ : List[str] = inputs_dict['''attention_mask'''][:1, :]
snake_case__ : Union[str, Any] = 1
# first forward pass
snake_case__ : Dict = model(lowerCamelCase , attention_mask=lowerCamelCase , use_cache=lowerCamelCase )
snake_case__ ,snake_case__ : List[str] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
snake_case__ : Any = ids_tensor((self.batch_size, 3) , config.vocab_size )
snake_case__ : Optional[int] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
snake_case__ : Optional[int] = tf.concat([input_ids, next_tokens] , axis=-1 )
snake_case__ : Dict = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
snake_case__ : List[str] = model(lowerCamelCase , attention_mask=lowerCamelCase )[0]
snake_case__ : int = model(lowerCamelCase , attention_mask=lowerCamelCase , past_key_values=lowerCamelCase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
snake_case__ : List[Any] = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
snake_case__ : List[str] = output_from_no_past[:, -3:, random_slice_idx]
snake_case__ : Tuple = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowerCamelCase , lowerCamelCase , rtol=1E-3 )
def _A ( snake_case__ : Tuple , snake_case__ : Dict , snake_case__ : List[Any] , snake_case__ : Tuple=None , snake_case__ : List[str]=None , snake_case__ : Union[str, Any]=None , snake_case__ : List[str]=None , ):
if attention_mask is None:
snake_case__ : Dict = tf.cast(tf.math.not_equal(snake_case__ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
snake_case__ : Tuple = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
snake_case__ : List[str] = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
snake_case__ : Optional[Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class snake_case ( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
_lowerCAmelCase = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
_lowerCAmelCase = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
_lowerCAmelCase = (
{
'conversational': TFLEDForConditionalGeneration,
'feature-extraction': TFLEDModel,
'summarization': TFLEDForConditionalGeneration,
'text2text-generation': TFLEDForConditionalGeneration,
'translation': TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
_lowerCAmelCase = True
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
def lowercase__ ( self ) -> Optional[Any]:
"""simple docstring"""
snake_case__ : Union[str, Any] = TFLEDModelTester(self )
snake_case__ : int = ConfigTester(self , config_class=lowerCamelCase )
def lowercase__ ( self ) -> Any:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowercase__ ( self ) -> List[Any]:
"""simple docstring"""
snake_case__ : str = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowerCamelCase )
def lowercase__ ( self ) -> Optional[Any]:
"""simple docstring"""
snake_case__ ,snake_case__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : Dict = tf.zeros_like(inputs_dict['''attention_mask'''] )
snake_case__ : Union[str, Any] = 2
snake_case__ : Tuple = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict['''global_attention_mask'''] , )
snake_case__ : Optional[Any] = True
snake_case__ : Optional[int] = self.model_tester.seq_length
snake_case__ : str = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(lowerCamelCase ):
snake_case__ : Optional[Any] = outputs.decoder_attentions
self.assertEqual(len(lowerCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(lowerCamelCase ):
snake_case__ : Union[str, Any] = [t.numpy() for t in outputs.encoder_attentions]
snake_case__ : Optional[Any] = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(lowerCamelCase ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(lowerCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
snake_case__ : Dict = True
snake_case__ : int = False
snake_case__ : Optional[int] = False
snake_case__ : Optional[int] = model_class(lowerCamelCase )
snake_case__ : Any = model(self._prepare_for_class(lowerCamelCase , lowerCamelCase ) )
snake_case__ : Dict = len(lowerCamelCase )
self.assertEqual(config.output_hidden_states , lowerCamelCase )
check_encoder_attentions_output(lowerCamelCase )
if self.is_encoder_decoder:
snake_case__ : List[str] = model_class(lowerCamelCase )
snake_case__ : Optional[int] = model(self._prepare_for_class(lowerCamelCase , lowerCamelCase ) )
self.assertEqual(config.output_hidden_states , lowerCamelCase )
check_decoder_attentions_output(lowerCamelCase )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
snake_case__ : Union[str, Any] = True
snake_case__ : str = model_class(lowerCamelCase )
snake_case__ : Union[str, Any] = model(self._prepare_for_class(lowerCamelCase , lowerCamelCase ) )
self.assertEqual(config.output_hidden_states , lowerCamelCase )
check_encoder_attentions_output(lowerCamelCase )
# Check attention is always last and order is fine
snake_case__ : Tuple = True
snake_case__ : Any = True
snake_case__ : Any = model_class(lowerCamelCase )
snake_case__ : Any = model(self._prepare_for_class(lowerCamelCase , lowerCamelCase ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(lowerCamelCase ) )
self.assertEqual(model.config.output_hidden_states , lowerCamelCase )
check_encoder_attentions_output(lowerCamelCase )
@unittest.skip('''LED keeps using potentially symbolic tensors in conditionals and breaks tracing.''' )
def lowercase__ ( self ) -> List[Any]:
"""simple docstring"""
pass
def lowercase__ ( self ) -> int:
"""simple docstring"""
pass
def _A ( snake_case__ : Optional[int] ):
return tf.constant(snake_case__ , dtype=tf.intaa )
_lowerCAmelCase : Tuple = 1E-4
@slow
@require_tf
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def lowercase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ : Union[str, Any] = TFLEDForConditionalGeneration.from_pretrained('''allenai/led-base-16384''' ).led
# change to intended input here
snake_case__ : Dict = _long_tensor([512 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] )
snake_case__ : str = _long_tensor([128 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] )
snake_case__ : str = prepare_led_inputs_dict(model.config , lowerCamelCase , lowerCamelCase )
snake_case__ : List[str] = model(**lowerCamelCase )[0]
snake_case__ : str = (1, 1024, 768)
self.assertEqual(output.shape , lowerCamelCase )
# change to expected output here
snake_case__ : Optional[Any] = tf.convert_to_tensor(
[[2.3_050, 2.8_279, 0.6_531], [-1.8_457, -0.1_455, -3.5_661], [-1.0_186, 0.4_586, -2.2_043]] , )
tf.debugging.assert_near(output[:, :3, :3] , lowerCamelCase , atol=1E-3 )
def lowercase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ : Optional[int] = TFLEDForConditionalGeneration.from_pretrained('''allenai/led-base-16384''' )
# change to intended input here
snake_case__ : Optional[int] = _long_tensor([512 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] )
snake_case__ : str = _long_tensor([128 * [0, 31414, 232, 328, 740, 1140, 12695, 69]] )
snake_case__ : str = prepare_led_inputs_dict(model.config , lowerCamelCase , lowerCamelCase )
snake_case__ : Tuple = model(**lowerCamelCase )[0]
snake_case__ : List[str] = (1, 1024, model.config.vocab_size)
self.assertEqual(output.shape , lowerCamelCase )
# change to expected output here
snake_case__ : Any = tf.convert_to_tensor(
[[33.6_507, 6.4_572, 16.8_089], [5.8_739, -2.4_238, 11.2_902], [-3.2_139, -4.3_149, 4.2_783]] , )
tf.debugging.assert_near(output[:, :3, :3] , lowerCamelCase , atol=1E-3 , rtol=1E-3 )
| 261 | 0 |
import os
from collections import namedtuple
import pytest
from datasets import ClassLabel, Features, Sequence, Value
from datasets.commands.test import TestCommand
from datasets.info import DatasetInfo, DatasetInfosDict
a_ = namedtuple(
"_TestCommandArgs",
[
"dataset",
"name",
"cache_dir",
"data_dir",
"all_configs",
"save_infos",
"ignore_verifications",
"force_redownload",
"clear_cache",
],
defaults=[None, None, None, False, False, False, False, False],
)
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str:
"""simple docstring"""
return (abs(source - target ) / target) < 0.01
@pytest.mark.integration
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> Any:
"""simple docstring"""
__UpperCamelCase = _TestCommandArgs(dataset=lowercase_ , all_configs=lowercase_ , save_infos=lowercase_ )
__UpperCamelCase = TestCommand(*lowercase_ )
test_command.run()
__UpperCamelCase = os.path.join(lowercase_ , '''README.md''' )
assert os.path.exists(lowercase_ )
__UpperCamelCase = DatasetInfosDict.from_directory(lowercase_ )
__UpperCamelCase = DatasetInfosDict(
{
'''default''': DatasetInfo(
features=Features(
{
'''tokens''': Sequence(Value('''string''' ) ),
'''ner_tags''': Sequence(
ClassLabel(names=['''O''', '''B-PER''', '''I-PER''', '''B-ORG''', '''I-ORG''', '''B-LOC''', '''I-LOC'''] ) ),
'''langs''': Sequence(Value('''string''' ) ),
'''spans''': Sequence(Value('''string''' ) ),
} ) , splits=[
{
'''name''': '''train''',
'''num_bytes''': 2_35_15_63,
'''num_examples''': 1_00_00,
},
{
'''name''': '''validation''',
'''num_bytes''': 23_84_18,
'''num_examples''': 10_00,
},
] , download_size=3_94_06_80 , dataset_size=2_58_99_81 , )
} )
assert dataset_infos.keys() == expected_dataset_infos.keys()
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
__UpperCamelCase , __UpperCamelCase = getattr(dataset_infos['''default'''] , lowercase_ ), getattr(expected_dataset_infos['''default'''] , lowercase_ )
if key == "num_bytes":
assert is_apercent_close(lowercase_ , lowercase_ )
elif key == "splits":
assert list(lowercase_ ) == list(lowercase_ )
for split in result:
assert result[split].name == expected[split].name
assert result[split].num_examples == expected[split].num_examples
assert is_apercent_close(result[split].num_bytes , expected[split].num_bytes )
else:
result == expected
| 375 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
a_ = logging.get_logger(__name__)
a_ = {
"speechbrain/m-ctc-t-large": "https://huggingface.co/speechbrain/m-ctc-t-large/resolve/main/config.json",
# See all M-CTC-T models at https://huggingface.co/models?filter=mctct
}
class _lowerCamelCase ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowerCAmelCase__ : int = "mctct"
def __init__( self : List[Any] , snake_case : Optional[Any]=8065 , snake_case : Optional[int]=1536 , snake_case : Any=36 , snake_case : List[str]=6144 , snake_case : Dict=4 , snake_case : str=384 , snake_case : List[str]=920 , snake_case : Dict=1E-5 , snake_case : Union[str, Any]=0.3 , snake_case : Optional[Any]="relu" , snake_case : str=0.02 , snake_case : Optional[int]=0.3 , snake_case : int=0.3 , snake_case : Any=1 , snake_case : int=0 , snake_case : Union[str, Any]=2 , snake_case : List[Any]=1 , snake_case : Dict=0.3 , snake_case : int=1 , snake_case : Optional[int]=(7,) , snake_case : List[Any]=(3,) , snake_case : Optional[int]=80 , snake_case : List[str]=1 , snake_case : int=None , snake_case : List[str]="sum" , snake_case : Tuple=False , **snake_case : List[str] , ):
super().__init__(**snake_case , pad_token_id=snake_case , bos_token_id=snake_case , eos_token_id=snake_case )
__UpperCamelCase = vocab_size
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = intermediate_size
__UpperCamelCase = num_attention_heads
__UpperCamelCase = attention_head_dim
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = layer_norm_eps
__UpperCamelCase = layerdrop
__UpperCamelCase = hidden_act
__UpperCamelCase = initializer_range
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = pad_token_id
__UpperCamelCase = bos_token_id
__UpperCamelCase = eos_token_id
__UpperCamelCase = conv_glu_dim
__UpperCamelCase = conv_dropout
__UpperCamelCase = num_conv_layers
__UpperCamelCase = input_feat_per_channel
__UpperCamelCase = input_channels
__UpperCamelCase = conv_channels
__UpperCamelCase = ctc_loss_reduction
__UpperCamelCase = ctc_zero_infinity
# prevents config testing fail with exporting to json
__UpperCamelCase = list(snake_case )
__UpperCamelCase = list(snake_case )
if len(self.conv_kernel ) != self.num_conv_layers:
raise ValueError(
'''Configuration for convolutional module is incorrect. '''
'''It is required that `len(config.conv_kernel)` == `config.num_conv_layers` '''
F"but is `len(config.conv_kernel) = {len(self.conv_kernel )}`, "
F"`config.num_conv_layers = {self.num_conv_layers}`." )
| 375 | 1 |
import unittest
import torch
from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel
from diffusers.training_utils import set_seed
from diffusers.utils.testing_utils import slow
__lowerCamelCase : int = False
class a ( unittest.TestCase ):
def lowerCAmelCase_ ( self , __UpperCamelCase=32 )-> int:
'''simple docstring'''
set_seed(0 )
A__ : List[str] =UNetaDModel(sample_size=__UpperCamelCase , in_channels=3 , out_channels=3 )
A__ : int =torch.optim.SGD(model.parameters() , lr=0.0001 )
return model, optimizer
@slow
def lowerCAmelCase_ ( self )-> Dict:
'''simple docstring'''
A__ : List[Any] ='''cpu''' # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable
A__ : Dict =DDPMScheduler(
num_train_timesteps=10_00 , beta_start=0.0001 , beta_end=0.02 , beta_schedule='''linear''' , clip_sample=__UpperCamelCase , )
A__ : List[Any] =DDIMScheduler(
num_train_timesteps=10_00 , beta_start=0.0001 , beta_end=0.02 , beta_schedule='''linear''' , clip_sample=__UpperCamelCase , )
assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps
# shared batches for DDPM and DDIM
set_seed(0 )
A__ : Tuple =[torch.randn((4, 3, 32, 32) ).clip(-1 , 1 ).to(__UpperCamelCase ) for _ in range(4 )]
A__ : Tuple =[torch.randn((4, 3, 32, 32) ).to(__UpperCamelCase ) for _ in range(4 )]
A__ : Optional[Any] =[torch.randint(0 , 10_00 , (4,) ).long().to(__UpperCamelCase ) for _ in range(4 )]
# train with a DDPM scheduler
A__ , A__ : Optional[Any] =self.get_model_optimizer(resolution=32 )
model.train().to(__UpperCamelCase )
for i in range(4 ):
optimizer.zero_grad()
A__ : Optional[Any] =ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
A__ : str =model(__UpperCamelCase , timesteps[i] ).sample
A__ : List[Any] =torch.nn.functional.mse_loss(__UpperCamelCase , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
# recreate the model and optimizer, and retry with DDIM
A__ , A__ : str =self.get_model_optimizer(resolution=32 )
model.train().to(__UpperCamelCase )
for i in range(4 ):
optimizer.zero_grad()
A__ : Tuple =ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
A__ : Optional[int] =model(__UpperCamelCase , timesteps[i] ).sample
A__ : int =torch.nn.functional.mse_loss(__UpperCamelCase , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
self.assertTrue(torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=1E-5 ) )
self.assertTrue(torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=1E-5 ) )
| 416 |
import sys
import turtle
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> tuple[float, float]:
return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_, snake_case_, ) -> None:
my_pen.up()
my_pen.goto(vertexa[0], vertexa[1] )
my_pen.down()
my_pen.goto(vertexa[0], vertexa[1] )
my_pen.goto(vertexa[0], vertexa[1] )
my_pen.goto(vertexa[0], vertexa[1] )
if depth == 0:
return
triangle(snake_case_, get_mid(snake_case_, snake_case_ ), get_mid(snake_case_, snake_case_ ), depth - 1 )
triangle(snake_case_, get_mid(snake_case_, snake_case_ ), get_mid(snake_case_, snake_case_ ), depth - 1 )
triangle(snake_case_, get_mid(snake_case_, snake_case_ ), get_mid(snake_case_, snake_case_ ), depth - 1 )
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError(
"Correct format for using this script: "
"python fractals.py <int:depth_for_fractal>"
)
__lowerCamelCase : str = turtle.Turtle()
my_pen.ht()
my_pen.speed(5)
my_pen.pencolor("red")
__lowerCamelCase : List[Any] = [(-175, -125), (0, 175), (175, -125)] # vertices of triangle
triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
| 416 | 1 |
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def lowerCamelCase_(lowerCamelCase_ , lowerCamelCase_ ) -> Optional[Any]:
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
UpperCAmelCase = flax_key_tuple[:-1] + ("weight",)
UpperCAmelCase = torch.permute(lowerCamelCase_ , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(lowerCamelCase_ ):
# linear layer
UpperCAmelCase = flax_key_tuple[:-1] + ("weight",)
UpperCAmelCase = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
UpperCAmelCase = flax_key_tuple[:-1] + ("weight",)
return flax_key_tuple, flax_tensor
def lowerCamelCase_(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> int:
if "metadata" in layer:
UpperCAmelCase = layer.split("metadata" )
UpperCAmelCase = "".join(split_layer[0] )[:-1]
UpperCAmelCase = [tuple(("metadata" + split_layer[1]).split("/" ) )]
elif "kvstore" in layer:
UpperCAmelCase = layer.split("kvstore" )
UpperCAmelCase = "".join(split_layer[0] )[:-1]
UpperCAmelCase = [tuple(("kvstore" + split_layer[1]).split("/" ) )]
else:
UpperCAmelCase = layer.split("/" )
UpperCAmelCase = "/".join(split_layer[:-1] )
UpperCAmelCase = (split_layer[-1],)
if "kvstore/path" in layer:
UpperCAmelCase = F'{switch_checkpoint_path}/{checkpoint_info[layer]}'
elif "kvstore/driver" in layer:
UpperCAmelCase = "file"
else:
UpperCAmelCase = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def lowerCamelCase_(lowerCamelCase_ , lowerCamelCase_ ) -> List[str]:
UpperCAmelCase = rename_keys(lowerCamelCase_ )
UpperCAmelCase = {}
for k, v in current_block.items():
UpperCAmelCase = v
UpperCAmelCase = new_current_block
torch.save(lowerCamelCase_ , lowerCamelCase_ )
def lowerCamelCase_(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = WEIGHTS_NAME ) -> List[str]:
UpperCAmelCase = convert_file_size_to_int(lowerCamelCase_ )
UpperCAmelCase = []
UpperCAmelCase = {}
UpperCAmelCase = 0
UpperCAmelCase = 0
os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_ )
with gfile.GFile(switch_checkpoint_path + "/checkpoint" , "rb" ) as fp:
UpperCAmelCase = serialization.msgpack_restore(fp.read() )["optimizer"]["target"]
UpperCAmelCase = flatten_dict(lowerCamelCase_ , sep="/" )
UpperCAmelCase = {}
for layer in checkpoint_info.keys():
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = get_key_and_tensorstore_dict(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
if curr_real_layer_name in all_layers:
UpperCAmelCase = content
else:
UpperCAmelCase = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
UpperCAmelCase = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
UpperCAmelCase = torch.tensor(lowerCamelCase_ )
UpperCAmelCase = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
UpperCAmelCase , UpperCAmelCase = rename_base_flax_keys(tuple(key.split("/" ) ) , lowerCamelCase_ )
UpperCAmelCase = "/".join(lowerCamelCase_ )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
UpperCAmelCase = os.path.join(
lowerCamelCase_ , weights_name.replace(".bin" , F'-{len(lowerCamelCase_ )+1:05d}-of-???.bin' ) )
rename_and_save_block(lowerCamelCase_ , lowerCamelCase_ )
sharded_state_dicts.append(current_block.keys() )
del current_block
UpperCAmelCase = {}
UpperCAmelCase = 0
UpperCAmelCase = raw_weights.to(getattr(lowerCamelCase_ , lowerCamelCase_ ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
UpperCAmelCase = os.path.join(lowerCamelCase_ , weights_name.replace(".bin" , F'-{len(lowerCamelCase_ )+1:05d}-of-???.bin' ) )
rename_and_save_block(lowerCamelCase_ , lowerCamelCase_ )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(lowerCamelCase_ ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
UpperCAmelCase = {}
UpperCAmelCase = {}
for idx, shard in enumerate(lowerCamelCase_ ):
UpperCAmelCase = weights_name.replace(
".bin" , F'-{idx+1:05d}-of-{len(lowerCamelCase_ ):05d}.bin' ) # len(sharded_state_dicts):05d}
UpperCAmelCase = os.path.join(lowerCamelCase_ , weights_name.replace(".bin" , F'-{idx+1:05d}-of-???.bin' ) )
os.rename(lowerCamelCase_ , os.path.join(lowerCamelCase_ , lowerCamelCase_ ) )
UpperCAmelCase = shard
for key in shard:
UpperCAmelCase = shard_file
# Add the metadata
UpperCAmelCase = {"total_size": total_size}
UpperCAmelCase = {"metadata": metadata, "weight_map": weight_map}
with open(os.path.join(lowerCamelCase_ , lowerCamelCase_ ) , "w" , encoding="utf-8" ) as f:
UpperCAmelCase = json.dumps(lowerCamelCase_ , indent=2 , sort_keys=lowerCamelCase_ ) + "\n"
f.write(lowerCamelCase_ )
return metadata, index
if __name__ == "__main__":
__lowerCamelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--switch_t5x_checkpoint_path",
default="/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600",
type=str,
required=False,
help="Path to a directory containing a folder per layer. Follows the original Google format.",
)
parser.add_argument("--max_shard_size", default="10GB", required=False, help="Max shard size")
parser.add_argument("--dtype", default="bfloat16", type=str, required=False, help="dtype of the saved model")
parser.add_argument(
"--pytorch_dump_folder_path",
default="/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted",
type=str,
required=False,
help="Path to the output pytorch model.",
)
__lowerCamelCase : Union[str, Any] = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def lowerCamelCase_() -> Dict:
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
UpperCAmelCase = SwitchTransformersConfig.from_pretrained("google/switch-base-8" )
config.save_pretrained("/home/arthur_huggingface_co/transformers/switch_converted" )
UpperCAmelCase = SwitchTransformersForConditionalGeneration.from_pretrained(
"/home/arthur_huggingface_co/transformers/switch_converted" , device_map="auto" )
UpperCAmelCase = TaTokenizer.from_pretrained("t5-small" )
UpperCAmelCase = "A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>."
UpperCAmelCase = tokenizer(lowerCamelCase_ , return_tensors="pt" ).input_ids
UpperCAmelCase = model.generate(lowerCamelCase_ , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) )
| 457 |
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
__lowerCamelCase : str = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
__lowerCamelCase : Optional[Any] = " def __init__(self, config):\n super().__init__()\n self.transform = BertPredictionHeadTransform(config)\n\n # The output weights are the same as the input embeddings, but there is\n # an output-only bias for each token.\n self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n\n self.bias = nn.Parameter(torch.zeros(config.vocab_size))\n\n # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`\n self.decoder.bias = self.bias\n\n def forward(self, hidden_states):\n hidden_states = self.transform(hidden_states)\n hidden_states = self.decoder(hidden_states)\n return hidden_states\n"
class __magic_name__ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir , "models/bert/" ) )
UpperCAmelCase = self.transformer_dir
shutil.copy(
os.path.join(UpperCamelCase__ , "src/transformers/models/bert/modeling_bert.py" ) , os.path.join(self.transformer_dir , "models/bert/modeling_bert.py" ) , )
def SCREAMING_SNAKE_CASE_ ( self : int ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase = "src/transformers"
shutil.rmtree(self.transformer_dir )
def SCREAMING_SNAKE_CASE_ ( self : Tuple , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Union[str, Any]=None ) -> str:
'''simple docstring'''
UpperCAmelCase = comment + F'\nclass {class_name}(nn.Module):\n' + class_code
if overwrite_result is not None:
UpperCAmelCase = comment + F'\nclass {class_name}(nn.Module):\n' + overwrite_result
UpperCAmelCase = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_19 )
UpperCAmelCase = black.format_str(UpperCamelCase__ , mode=UpperCamelCase__ )
UpperCAmelCase = os.path.join(self.transformer_dir , "new_code.py" )
with open(UpperCamelCase__ , "w" , newline="\n" ) as f:
f.write(UpperCamelCase__ )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(UpperCamelCase__ ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=UpperCamelCase__ )
with open(UpperCamelCase__ , "r" ) as f:
self.assertTrue(f.read() , UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase = check_copies.find_code_in_transformers("models.bert.modeling_bert.BertLMPredictionHead" )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
self.check_copy_consistency(
"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead" , "BertLMPredictionHead" , REFERENCE_CODE + "\n" , )
# With no empty line at the end
self.check_copy_consistency(
"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead" , "BertLMPredictionHead" , UpperCamelCase__ , )
# Copy consistency with rename
self.check_copy_consistency(
"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel" , "TestModelLMPredictionHead" , re.sub("Bert" , "TestModel" , UpperCamelCase__ ) , )
# Copy consistency with a really long name
UpperCAmelCase = "TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"
self.check_copy_consistency(
F'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}' , F'{long_class_name}LMPredictionHead' , re.sub("Bert" , UpperCamelCase__ , UpperCamelCase__ ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel" , "TestModelLMPredictionHead" , UpperCamelCase__ , overwrite_result=re.sub("Bert" , "TestModel" , UpperCamelCase__ ) , )
def SCREAMING_SNAKE_CASE_ ( self : int ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase = check_copies.LOCALIZED_READMES["README_zh-hans.md"]
UpperCAmelCase = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"
" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"
" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"
" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1."
" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),"
" released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"
" lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same"
" method has been applied to compress GPT2 into"
" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"
" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"
" Multilingual BERT into"
" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"
" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**"
" (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders"
" as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang"
" Luong, Quoc V. Le, Christopher D. Manning."
)
UpperCAmelCase = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"
" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"
" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"
" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"
)
UpperCAmelCase = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"
" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"
" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"
" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1."
" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文"
" [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"
" lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same"
" method has been applied to compress GPT2 into"
" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"
" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"
" Multilingual BERT into"
" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"
" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自"
" Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather"
" than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,"
" Christopher D. Manning 发布。\n"
)
UpperCAmelCase , UpperCAmelCase = check_copies.convert_to_localized_md(
UpperCamelCase__ , UpperCamelCase__ , localized_readme["format_model_list"] )
self.assertFalse(UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
UpperCAmelCase , UpperCAmelCase = check_copies.convert_to_localized_md(
UpperCamelCase__ , UpperCamelCase__ , localized_readme["format_model_list"] )
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(UpperCamelCase__ )
UpperCAmelCase = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"
" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"
" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"
" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut."
)
UpperCAmelCase = (
"1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and"
" the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"
" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"
" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"
)
UpperCAmelCase = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"
" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"
" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"
" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"
)
UpperCAmelCase , UpperCAmelCase = check_copies.convert_to_localized_md(
UpperCamelCase__ , UpperCamelCase__ , localized_readme["format_model_list"] )
# Check if the model link is synchronized.
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
| 457 | 1 |
import inspect
import unittest
import numpy as np
from transformers import ViTConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel
class A__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , __snake_case , __snake_case=1_3 , __snake_case=3_0 , __snake_case=2 , __snake_case=3 , __snake_case=True , __snake_case=True , __snake_case=3_2 , __snake_case=5 , __snake_case=4 , __snake_case=3_7 , __snake_case="gelu" , __snake_case=0.1 , __snake_case=0.1 , __snake_case=1_0 , __snake_case=0.02 , ):
snake_case = parent
snake_case = batch_size
snake_case = image_size
snake_case = patch_size
snake_case = num_channels
snake_case = is_training
snake_case = use_labels
snake_case = hidden_size
snake_case = num_hidden_layers
snake_case = num_attention_heads
snake_case = intermediate_size
snake_case = hidden_act
snake_case = hidden_dropout_prob
snake_case = attention_probs_dropout_prob
snake_case = type_sequence_label_size
snake_case = initializer_range
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
snake_case = (image_size // patch_size) ** 2
snake_case = num_patches + 1
def a_ ( self ):
snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case = ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_snake_case , initializer_range=self.initializer_range , )
return config, pixel_values
def a_ ( self , __snake_case , __snake_case ):
snake_case = FlaxViTModel(config=_snake_case )
snake_case = model(_snake_case )
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
snake_case = (self.image_size, self.image_size)
snake_case = (self.patch_size, self.patch_size)
snake_case = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, num_patches + 1, self.hidden_size) )
def a_ ( self , __snake_case , __snake_case ):
snake_case = self.type_sequence_label_size
snake_case = FlaxViTForImageClassification(config=_snake_case )
snake_case = model(_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
snake_case = 1
snake_case = FlaxViTForImageClassification(_snake_case )
snake_case = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
snake_case = model(_snake_case )
def a_ ( self ):
snake_case = self.prepare_config_and_inputs()
(
snake_case
) = config_and_inputs
snake_case = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_flax
class A__ ( __lowercase , unittest.TestCase ):
"""simple docstring"""
__magic_name__ = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else ()
def a_ ( self ):
snake_case = FlaxViTModelTester(self )
snake_case = ConfigTester(self , config_class=_snake_case , has_text_modality=_snake_case , hidden_size=3_7 )
def a_ ( self ):
self.config_tester.run_common_tests()
def a_ ( self ):
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def a_ ( self ):
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_snake_case )
def a_ ( self ):
snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case = model_class(_snake_case )
snake_case = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case = [*signature.parameters.keys()]
snake_case = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _snake_case )
def a_ ( self ):
snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
snake_case = self._prepare_for_class(_snake_case , _snake_case )
snake_case = model_class(_snake_case )
@jax.jit
def model_jitted(__snake_case , **__snake_case ):
return model(pixel_values=_snake_case , **_snake_case )
with self.subTest('''JIT Enabled''' ):
snake_case = model_jitted(**_snake_case ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
snake_case = model_jitted(**_snake_case ).to_tuple()
self.assertEqual(len(_snake_case ) , len(_snake_case ) )
for jitted_output, output in zip(_snake_case , _snake_case ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def a_ ( self ):
for model_class_name in self.all_model_classes:
snake_case = model_class_name.from_pretrained('''google/vit-base-patch16-224''' )
snake_case = model(np.ones((1, 3, 2_2_4, 2_2_4) ) )
self.assertIsNotNone(_snake_case )
| 550 |
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import Callable, Dict, List, Tuple
import timm
import torch
import torch.nn as nn
from classy_vision.models.regnet import RegNet, RegNetParams, RegNetYaagf, RegNetYaagf, RegNetYaaagf
from huggingface_hub import cached_download, hf_hub_url
from torch import Tensor
from vissl.models.model_helpers import get_trunk_forward_outputs
from transformers import AutoImageProcessor, RegNetConfig, RegNetForImageClassification, RegNetModel
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCAmelCase = logging.get_logger()
@dataclass
class snake_case_ :
A_ = 42
A_ = field(default_factory=__lowercase )
A_ = field(default_factory=__lowercase )
def UpperCAmelCase__ ( self : Union[str, Any] , _snake_case : Dict , _snake_case : Tensor , _snake_case : Tensor )->List[str]:
'''simple docstring'''
__lowerCAmelCase : Union[str, Any] = len(list(m.modules() ) ) == 1 or isinstance(_snake_case , nn.Convad ) or isinstance(_snake_case , nn.BatchNormad )
if has_not_submodules:
self.traced.append(_snake_case )
def __call__( self : Optional[Any] , _snake_case : Tensor )->List[Any]:
'''simple docstring'''
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(_snake_case )
[x.remove() for x in self.handles]
return self
@property
def UpperCAmelCase__ ( self : int )->List[str]:
'''simple docstring'''
return list(filter(lambda _snake_case : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class snake_case_ :
A_ = 42
A_ = 42
A_ = 1
A_ = field(default_factory=__lowercase )
A_ = field(default_factory=__lowercase )
A_ = True
def __call__( self : Tuple , _snake_case : Tensor )->List[str]:
'''simple docstring'''
__lowerCAmelCase : int = Tracker(self.dest )(_snake_case ).parametrized
__lowerCAmelCase : List[str] = Tracker(self.src )(_snake_case ).parametrized
__lowerCAmelCase : int = list(filter(lambda _snake_case : type(_snake_case ) not in self.src_skip , _snake_case ) )
__lowerCAmelCase : List[str] = list(filter(lambda _snake_case : type(_snake_case ) not in self.dest_skip , _snake_case ) )
if len(_snake_case ) != len(_snake_case ) and self.raise_if_mismatch:
raise Exception(
F'''Numbers of operations are different. Source module has {len(_snake_case )} operations while'''
F''' destination module has {len(_snake_case )}.''' )
for dest_m, src_m in zip(_snake_case , _snake_case ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(F'''Transfered from={src_m} to={dest_m}''' )
class snake_case_ ( nn.Module ):
def __init__( self : Any , _snake_case : nn.Module )->str:
'''simple docstring'''
super().__init__()
__lowerCAmelCase : List[Tuple[str, nn.Module]] = []
# - get the stem
feature_blocks.append(("""conv1""", model.stem) )
# - get all the feature blocks
for k, v in model.trunk_output.named_children():
assert k.startswith("""block""" ), F'''Unexpected layer name {k}'''
__lowerCAmelCase : List[str] = len(_snake_case ) + 1
feature_blocks.append((F'''res{block_index}''', v) )
__lowerCAmelCase : List[Any] = nn.ModuleDict(_snake_case )
def UpperCAmelCase__ ( self : Optional[Any] , _snake_case : Tensor )->Optional[int]:
'''simple docstring'''
return get_trunk_forward_outputs(
_snake_case , out_feat_keys=_snake_case , feature_blocks=self._feature_blocks , )
class snake_case_ ( __lowercase ):
def UpperCAmelCase__ ( self : List[str] , _snake_case : str )->str:
'''simple docstring'''
__lowerCAmelCase : Optional[Any] = x.split("""-""" )
return x_split[0] + x_split[1] + "_" + "".join(x_split[2:] )
def __getitem__( self : Optional[int] , _snake_case : str )->Callable[[], Tuple[nn.Module, Dict]]:
'''simple docstring'''
if x not in self:
__lowerCAmelCase : int = self.convert_name_to_timm(_snake_case )
__lowerCAmelCase : List[Any] = partial(lambda: (timm.create_model(_snake_case , pretrained=_snake_case ).eval(), None) )
else:
__lowerCAmelCase : Optional[Any] = super().__getitem__(_snake_case )
return val
class snake_case_ ( __lowercase ):
def __getitem__( self : Union[str, Any] , _snake_case : str )->Callable[[], nn.Module]:
'''simple docstring'''
if "seer" in x and "in1k" not in x:
__lowerCAmelCase : Optional[int] = RegNetModel
else:
__lowerCAmelCase : str = RegNetForImageClassification
return val
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :Any , SCREAMING_SNAKE_CASE :List[Tuple[str, str]] ) -> Any:
for from_key, to_key in keys:
__lowerCAmelCase : List[Any] = from_state_dict[from_key].clone()
print(F'''Copied key={from_key} to={to_key}''' )
return to_state_dict
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :Callable[[], nn.Module] , SCREAMING_SNAKE_CASE :Callable[[], nn.Module] , SCREAMING_SNAKE_CASE :RegNetConfig , SCREAMING_SNAKE_CASE :Path , SCREAMING_SNAKE_CASE :bool = True , ) -> Union[str, Any]:
print(F'''Converting {name}...''' )
with torch.no_grad():
__lowerCAmelCase , __lowerCAmelCase : List[Any] = from_model_func()
__lowerCAmelCase : int = our_model_func(SCREAMING_SNAKE_CASE ).eval()
__lowerCAmelCase : Any = ModuleTransfer(src=SCREAMING_SNAKE_CASE , dest=SCREAMING_SNAKE_CASE , raise_if_mismatch=SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = torch.randn((1, 3, 224, 224) )
module_transfer(SCREAMING_SNAKE_CASE )
if from_state_dict is not None:
__lowerCAmelCase : str = []
# for seer - in1k finetuned we have to manually copy the head
if "seer" in name and "in1k" in name:
__lowerCAmelCase : Optional[int] = [("""0.clf.0.weight""", """classifier.1.weight"""), ("""0.clf.0.bias""", """classifier.1.bias""")]
__lowerCAmelCase : Any = manually_copy_vissl_head(SCREAMING_SNAKE_CASE , our_model.state_dict() , SCREAMING_SNAKE_CASE )
our_model.load_state_dict(SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = our_model(SCREAMING_SNAKE_CASE , output_hidden_states=SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = (
our_outputs.logits if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else our_outputs.last_hidden_state
)
__lowerCAmelCase : Optional[int] = from_model(SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = from_output[-1] if type(SCREAMING_SNAKE_CASE ) is list else from_output
# now since I don't want to use any config files, vissl seer model doesn't actually have an head, so let's just check the last hidden state
if "seer" in name and "in1k" in name:
__lowerCAmelCase : Union[str, Any] = our_outputs.hidden_states[-1]
assert torch.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ), "The model logits don't match the original one."
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / name , commit_message="""Add model""" , use_temp_dir=SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : List[str] = 224 if """seer""" not in name else 384
# we can use the convnext one
__lowerCAmelCase : Union[str, Any] = AutoImageProcessor.from_pretrained("""facebook/convnext-base-224-22k-1k""" , size=SCREAMING_SNAKE_CASE )
image_processor.push_to_hub(
repo_path_or_name=save_directory / name , commit_message="""Add image processor""" , use_temp_dir=SCREAMING_SNAKE_CASE , )
print(F'''Pushed {name}''' )
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :Path , SCREAMING_SNAKE_CASE :str = None , SCREAMING_SNAKE_CASE :bool = True ) -> Union[str, Any]:
__lowerCAmelCase : List[str] = """imagenet-1k-id2label.json"""
__lowerCAmelCase : str = 1_000
__lowerCAmelCase : Dict = (1, num_labels)
__lowerCAmelCase : Dict = """huggingface/label-files"""
__lowerCAmelCase : Tuple = num_labels
__lowerCAmelCase : str = json.load(open(cached_download(hf_hub_url(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , repo_type="""dataset""" ) ) , """r""" ) )
__lowerCAmelCase : List[Any] = {int(SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
__lowerCAmelCase : Optional[int] = idalabel
__lowerCAmelCase : int = {v: k for k, v in idalabel.items()}
__lowerCAmelCase : List[str] = partial(SCREAMING_SNAKE_CASE , num_labels=SCREAMING_SNAKE_CASE , idalabel=SCREAMING_SNAKE_CASE , labelaid=SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = {
"""regnet-x-002""": ImageNetPreTrainedConfig(
depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 152, 368] , groups_width=8 , layer_type="""x""" ),
"""regnet-x-004""": ImageNetPreTrainedConfig(
depths=[1, 2, 7, 12] , hidden_sizes=[32, 64, 160, 384] , groups_width=16 , layer_type="""x""" ),
"""regnet-x-006""": ImageNetPreTrainedConfig(
depths=[1, 3, 5, 7] , hidden_sizes=[48, 96, 240, 528] , groups_width=24 , layer_type="""x""" ),
"""regnet-x-008""": ImageNetPreTrainedConfig(
depths=[1, 3, 7, 5] , hidden_sizes=[64, 128, 288, 672] , groups_width=16 , layer_type="""x""" ),
"""regnet-x-016""": ImageNetPreTrainedConfig(
depths=[2, 4, 10, 2] , hidden_sizes=[72, 168, 408, 912] , groups_width=24 , layer_type="""x""" ),
"""regnet-x-032""": ImageNetPreTrainedConfig(
depths=[2, 6, 15, 2] , hidden_sizes=[96, 192, 432, 1_008] , groups_width=48 , layer_type="""x""" ),
"""regnet-x-040""": ImageNetPreTrainedConfig(
depths=[2, 5, 14, 2] , hidden_sizes=[80, 240, 560, 1_360] , groups_width=40 , layer_type="""x""" ),
"""regnet-x-064""": ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1] , hidden_sizes=[168, 392, 784, 1_624] , groups_width=56 , layer_type="""x""" ),
"""regnet-x-080""": ImageNetPreTrainedConfig(
depths=[2, 5, 15, 1] , hidden_sizes=[80, 240, 720, 1_920] , groups_width=120 , layer_type="""x""" ),
"""regnet-x-120""": ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1] , hidden_sizes=[224, 448, 896, 2_240] , groups_width=112 , layer_type="""x""" ),
"""regnet-x-160""": ImageNetPreTrainedConfig(
depths=[2, 6, 13, 1] , hidden_sizes=[256, 512, 896, 2_048] , groups_width=128 , layer_type="""x""" ),
"""regnet-x-320""": ImageNetPreTrainedConfig(
depths=[2, 7, 13, 1] , hidden_sizes=[336, 672, 1_344, 2_520] , groups_width=168 , layer_type="""x""" ),
# y variant
"""regnet-y-002""": ImageNetPreTrainedConfig(depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 152, 368] , groups_width=8 ),
"""regnet-y-004""": ImageNetPreTrainedConfig(
depths=[1, 3, 6, 6] , hidden_sizes=[48, 104, 208, 440] , groups_width=8 ),
"""regnet-y-006""": ImageNetPreTrainedConfig(
depths=[1, 3, 7, 4] , hidden_sizes=[48, 112, 256, 608] , groups_width=16 ),
"""regnet-y-008""": ImageNetPreTrainedConfig(
depths=[1, 3, 8, 2] , hidden_sizes=[64, 128, 320, 768] , groups_width=16 ),
"""regnet-y-016""": ImageNetPreTrainedConfig(
depths=[2, 6, 17, 2] , hidden_sizes=[48, 120, 336, 888] , groups_width=24 ),
"""regnet-y-032""": ImageNetPreTrainedConfig(
depths=[2, 5, 13, 1] , hidden_sizes=[72, 216, 576, 1_512] , groups_width=24 ),
"""regnet-y-040""": ImageNetPreTrainedConfig(
depths=[2, 6, 12, 2] , hidden_sizes=[128, 192, 512, 1_088] , groups_width=64 ),
"""regnet-y-064""": ImageNetPreTrainedConfig(
depths=[2, 7, 14, 2] , hidden_sizes=[144, 288, 576, 1_296] , groups_width=72 ),
"""regnet-y-080""": ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1] , hidden_sizes=[168, 448, 896, 2_016] , groups_width=56 ),
"""regnet-y-120""": ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1] , hidden_sizes=[224, 448, 896, 2_240] , groups_width=112 ),
"""regnet-y-160""": ImageNetPreTrainedConfig(
depths=[2, 4, 11, 1] , hidden_sizes=[224, 448, 1_232, 3_024] , groups_width=112 ),
"""regnet-y-320""": ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1_392, 3_712] , groups_width=232 ),
# models created by SEER -> https://arxiv.org/abs/2202.08360
"""regnet-y-320-seer""": RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1_392, 3_712] , groups_width=232 ),
"""regnet-y-640-seer""": RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[328, 984, 1_968, 4_920] , groups_width=328 ),
"""regnet-y-1280-seer""": RegNetConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[528, 1_056, 2_904, 7_392] , groups_width=264 ),
"""regnet-y-2560-seer""": RegNetConfig(
depths=[3, 7, 16, 1] , hidden_sizes=[640, 1_696, 2_544, 5_088] , groups_width=640 ),
"""regnet-y-10b-seer""": ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[2_020, 4_040, 11_110, 28_280] , groups_width=1_010 ),
# finetuned on imagenet
"""regnet-y-320-seer-in1k""": ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1_392, 3_712] , groups_width=232 ),
"""regnet-y-640-seer-in1k""": ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[328, 984, 1_968, 4_920] , groups_width=328 ),
"""regnet-y-1280-seer-in1k""": ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[528, 1_056, 2_904, 7_392] , groups_width=264 ),
"""regnet-y-2560-seer-in1k""": ImageNetPreTrainedConfig(
depths=[3, 7, 16, 1] , hidden_sizes=[640, 1_696, 2_544, 5_088] , groups_width=640 ),
"""regnet-y-10b-seer-in1k""": ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[2_020, 4_040, 11_110, 28_280] , groups_width=1_010 ),
}
__lowerCAmelCase : Dict = NameToOurModelFuncMap()
__lowerCAmelCase : Any = NameToFromModelFuncMap()
# add seer weights logic
def load_using_classy_vision(SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :Callable[[], nn.Module] ) -> Tuple[nn.Module, Dict]:
__lowerCAmelCase : Optional[int] = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE , model_dir=str(SCREAMING_SNAKE_CASE ) , map_location="""cpu""" )
__lowerCAmelCase : Optional[Any] = model_func()
# check if we have a head, if yes add it
__lowerCAmelCase : List[Any] = files["""classy_state_dict"""]["""base_model"""]["""model"""]
__lowerCAmelCase : List[Any] = model_state_dict["""trunk"""]
model.load_state_dict(SCREAMING_SNAKE_CASE )
return model.eval(), model_state_dict["heads"]
# pretrained
__lowerCAmelCase : Optional[int] = partial(
SCREAMING_SNAKE_CASE , """https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet32d/seer_regnet32gf_model_iteration244000.torch""" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
__lowerCAmelCase : Dict = partial(
SCREAMING_SNAKE_CASE , """https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet64/seer_regnet64gf_model_final_checkpoint_phase0.torch""" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
__lowerCAmelCase : List[str] = partial(
SCREAMING_SNAKE_CASE , """https://dl.fbaipublicfiles.com/vissl/model_zoo/swav_ig1b_regnet128Gf_cnstant_bs32_node16_sinkhorn10_proto16k_syncBN64_warmup8k/model_final_checkpoint_phase0.torch""" , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
__lowerCAmelCase : Union[str, Any] = partial(
SCREAMING_SNAKE_CASE , """https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet10B/model_iteration124500_conso.torch""" , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27 , group_width=1_010 , w_a=1_744 , w_a=6_20.83 , w_m=2.52 ) ) ) , )
# IN1K finetuned
__lowerCAmelCase : Dict = partial(
SCREAMING_SNAKE_CASE , """https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet32_finetuned_in1k_model_final_checkpoint_phase78.torch""" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
__lowerCAmelCase : int = partial(
SCREAMING_SNAKE_CASE , """https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet64_finetuned_in1k_model_final_checkpoint_phase78.torch""" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
__lowerCAmelCase : Union[str, Any] = partial(
SCREAMING_SNAKE_CASE , """https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet128_finetuned_in1k_model_final_checkpoint_phase78.torch""" , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
__lowerCAmelCase : int = partial(
SCREAMING_SNAKE_CASE , """https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_10b_finetuned_in1k_model_phase28_conso.torch""" , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27 , group_width=1_010 , w_a=1_744 , w_a=6_20.83 , w_m=2.52 ) ) ) , )
if model_name:
convert_weight_and_push(
SCREAMING_SNAKE_CASE , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , names_to_config[model_name] , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(
SCREAMING_SNAKE_CASE , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , )
return config, expected_shape
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default=None,
type=str,
help=(
'The name of the model you wish to convert, it must be one of the supported regnet* architecture,'
' currently: regnetx-*, regnety-*. If `None`, all of them will the converted.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=Path,
required=True,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
default=True,
type=bool,
required=False,
help='If True, push model and image processor to the hub.',
)
_UpperCAmelCase = parser.parse_args()
_UpperCAmelCase = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub) | 504 | 0 |
'''simple docstring'''
def _snake_case (_snake_case : Optional[Any] , _snake_case : List[Any]) -> Tuple:
_lowercase =''
for i in table:
res += inp[i - 1]
return res
def _snake_case (_snake_case : Union[str, Any]) -> Tuple:
return data[1:] + data[0]
def _snake_case (_snake_case : Any , _snake_case : str) -> Optional[int]:
_lowercase =''
for i in range(len(_snake_case)):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def _snake_case (_snake_case : int , _snake_case : Optional[int]) -> int:
_lowercase =int('0b' + data[0] + data[-1] , 2)
_lowercase =int('0b' + data[1:3] , 2)
return bin(s[row][col])[2:]
def _snake_case (_snake_case : str , _snake_case : List[str] , _snake_case : List[Any] , _snake_case : List[str] , _snake_case : Dict) -> Dict:
_lowercase =message[:4]
_lowercase =message[4:]
_lowercase =apply_table(_snake_case , _snake_case)
_lowercase =xor(_snake_case , _snake_case)
_lowercase =apply_sbox(_snake_case , temp[:4]) # noqa: E741
_lowercase =apply_sbox(_snake_case , temp[4:])
_lowercase ='0' * (2 - len(_snake_case)) + l # noqa: E741
_lowercase ='0' * (2 - len(_snake_case)) + r
_lowercase =apply_table(l + r , _snake_case)
_lowercase =xor(_snake_case , _snake_case)
return temp + right
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = input("Enter 10 bit key: ")
_SCREAMING_SNAKE_CASE = input("Enter 8 bit message: ")
_SCREAMING_SNAKE_CASE = [6, 3, 7, 4, 8, 5, 10, 9]
_SCREAMING_SNAKE_CASE = [3, 5, 2, 7, 4, 10, 1, 9, 8, 6]
_SCREAMING_SNAKE_CASE = [2, 4, 3, 1]
_SCREAMING_SNAKE_CASE = [2, 6, 3, 1, 4, 8, 5, 7]
_SCREAMING_SNAKE_CASE = [4, 1, 3, 5, 7, 2, 8, 6]
_SCREAMING_SNAKE_CASE = [4, 1, 2, 3, 2, 3, 4, 1]
_SCREAMING_SNAKE_CASE = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
_SCREAMING_SNAKE_CASE = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
_SCREAMING_SNAKE_CASE = apply_table(key, paa_table)
_SCREAMING_SNAKE_CASE = temp[:5]
_SCREAMING_SNAKE_CASE = temp[5:]
_SCREAMING_SNAKE_CASE = left_shift(left)
_SCREAMING_SNAKE_CASE = left_shift(right)
_SCREAMING_SNAKE_CASE = apply_table(left + right, pa_table)
_SCREAMING_SNAKE_CASE = left_shift(left)
_SCREAMING_SNAKE_CASE = left_shift(right)
_SCREAMING_SNAKE_CASE = left_shift(left)
_SCREAMING_SNAKE_CASE = left_shift(right)
_SCREAMING_SNAKE_CASE = apply_table(left + right, pa_table)
# encryption
_SCREAMING_SNAKE_CASE = apply_table(message, IP)
_SCREAMING_SNAKE_CASE = function(expansion, sa, sa, keya, temp)
_SCREAMING_SNAKE_CASE = temp[4:] + temp[:4]
_SCREAMING_SNAKE_CASE = function(expansion, sa, sa, keya, temp)
_SCREAMING_SNAKE_CASE = apply_table(temp, IP_inv)
print("Cipher text is:", CT)
# decryption
_SCREAMING_SNAKE_CASE = apply_table(CT, IP)
_SCREAMING_SNAKE_CASE = function(expansion, sa, sa, keya, temp)
_SCREAMING_SNAKE_CASE = temp[4:] + temp[:4]
_SCREAMING_SNAKE_CASE = function(expansion, sa, sa, keya, temp)
_SCREAMING_SNAKE_CASE = apply_table(temp, IP_inv)
print("Plain text after decypting is:", PT)
| 701 |
import itertools
import random
import unittest
import numpy as np
from transformers import ASTFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
_SCREAMING_SNAKE_CASE = random.Random()
if is_torch_available():
import torch
def _snake_case (_snake_case : str , _snake_case : Optional[Any]=1.0 , _snake_case : int=None , _snake_case : Optional[int]=None) -> Optional[int]:
if rng is None:
_lowercase =global_rng
_lowercase =[]
for batch_idx in range(shape[0]):
values.append([])
for _ in range(shape[1]):
values[-1].append(rng.random() * scale)
return values
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self :int, snake_case :List[Any], snake_case :List[str]=7, snake_case :Union[str, Any]=400, snake_case :List[Any]=2000, snake_case :Union[str, Any]=1, snake_case :Tuple=0.0, snake_case :Tuple=1_6000, snake_case :Optional[Any]=True, snake_case :List[Any]=True, ):
"""simple docstring"""
_lowercase =parent
_lowercase =batch_size
_lowercase =min_seq_length
_lowercase =max_seq_length
_lowercase =(self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_lowercase =feature_size
_lowercase =padding_value
_lowercase =sampling_rate
_lowercase =return_attention_mask
_lowercase =do_normalize
def UpperCamelCase__ ( self :int):
"""simple docstring"""
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def UpperCamelCase__ ( self :Tuple, snake_case :Optional[Any]=False, snake_case :int=False):
"""simple docstring"""
def _flatten(snake_case :Optional[int]):
return list(itertools.chain(*snake_case))
if equal_length:
_lowercase =floats_list((self.batch_size, self.max_seq_length))
else:
# make sure that inputs increase in size
_lowercase =[
_flatten(floats_list((x, self.feature_size)))
for x in range(self.min_seq_length, self.max_seq_length, self.seq_length_diff)
]
if numpify:
_lowercase =[np.asarray(snake_case) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class SCREAMING_SNAKE_CASE_ ( _a , unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase : Any =ASTFeatureExtractor
def UpperCamelCase__ ( self :str):
"""simple docstring"""
_lowercase =ASTFeatureExtractionTester(self)
def UpperCamelCase__ ( self :int):
"""simple docstring"""
_lowercase =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
# create three inputs of length 800, 1000, and 1200
_lowercase =[floats_list((1, x))[0] for x in range(800, 1400, 200)]
_lowercase =[np.asarray(snake_case) for speech_input in speech_inputs]
# Test not batched input
_lowercase =feat_extract(speech_inputs[0], return_tensors='np').input_values
_lowercase =feat_extract(np_speech_inputs[0], return_tensors='np').input_values
self.assertTrue(np.allclose(snake_case, snake_case, atol=1e-3))
# Test batched
_lowercase =feat_extract(snake_case, padding=snake_case, return_tensors='np').input_values
_lowercase =feat_extract(snake_case, padding=snake_case, return_tensors='np').input_values
for enc_seq_a, enc_seq_a in zip(snake_case, snake_case):
self.assertTrue(np.allclose(snake_case, snake_case, atol=1e-3))
# Test 2-D numpy arrays are batched.
_lowercase =[floats_list((1, x))[0] for x in (800, 800, 800)]
_lowercase =np.asarray(snake_case)
_lowercase =feat_extract(snake_case, return_tensors='np').input_values
_lowercase =feat_extract(snake_case, return_tensors='np').input_values
for enc_seq_a, enc_seq_a in zip(snake_case, snake_case):
self.assertTrue(np.allclose(snake_case, snake_case, atol=1e-3))
@require_torch
def UpperCamelCase__ ( self :Tuple):
"""simple docstring"""
import torch
_lowercase =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
_lowercase =np.random.rand(100).astype(np.floataa)
_lowercase =np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
_lowercase =feature_extractor.pad([{'input_values': inputs}], return_tensors='np')
self.assertTrue(np_processed.input_values.dtype == np.floataa)
_lowercase =feature_extractor.pad([{'input_values': inputs}], return_tensors='pt')
self.assertTrue(pt_processed.input_values.dtype == torch.floataa)
def UpperCamelCase__ ( self :Tuple, snake_case :Any):
"""simple docstring"""
from datasets import load_dataset
_lowercase =load_dataset('hf-internal-testing/librispeech_asr_dummy', 'clean', split='validation')
# automatic decoding with librispeech
_lowercase =ds.sort('id').select(range(snake_case))[:num_samples]['audio']
return [x["array"] for x in speech_samples]
@require_torch
def UpperCamelCase__ ( self :str):
"""simple docstring"""
_lowercase =torch.tensor(
[-0.9_8_9_4, -1.2_7_7_6, -0.9_0_6_6, -1.2_7_7_6, -0.9_3_4_9, -1.2_6_0_9, -1.0_3_8_6, -1.2_7_7_6,
-1.1_5_6_1, -1.2_7_7_6, -1.2_0_5_2, -1.2_7_2_3, -1.2_1_9_0, -1.2_1_3_2, -1.2_7_7_6, -1.1_1_3_3,
-1.1_9_5_3, -1.1_3_4_3, -1.1_5_8_4, -1.2_2_0_3, -1.1_7_7_0, -1.2_4_7_4, -1.2_3_8_1, -1.1_9_3_6,
-0.9_2_7_0, -0.8_3_1_7, -0.8_0_4_9, -0.7_7_0_6, -0.7_5_6_5, -0.7_8_6_9])
# fmt: on
_lowercase =self._load_datasamples(1)
_lowercase =ASTFeatureExtractor()
_lowercase =feature_extractor(snake_case, return_tensors='pt').input_values
self.assertEquals(input_values.shape, (1, 1024, 128))
self.assertTrue(torch.allclose(input_values[0, 0, :30], snake_case, atol=1e-4))
| 557 | 0 |
import argparse
import glob
import logging
import os
import sys
import time
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Tuple
import numpy as np
import pytorch_lightning as pl
import torch
from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback
from torch import nn
from torch.utils.data import DataLoader
from transformers import MBartTokenizer, TaForConditionalGeneration
from transformers.models.bart.modeling_bart import shift_tokens_right
from utils import (
ROUGE_KEYS,
LegacySeqaSeqDataset,
SeqaSeqDataset,
assert_all_frozen,
calculate_bleu,
calculate_rouge,
check_output_dir,
flatten_list,
freeze_embeds,
freeze_params,
get_git_info,
label_smoothed_nll_loss,
lmap,
pickle_save,
save_git_info,
save_json,
use_task_specific_params,
)
# need the parent dir module
sys.path.insert(2, str(Path(__file__).resolve().parents[1]))
from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa
snake_case = logging.getLogger(__name__)
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : Optional[int] = '''summarization'''
UpperCamelCase_ : Any = ['''loss''']
UpperCamelCase_ : int = ROUGE_KEYS
UpperCamelCase_ : Tuple = '''rouge2'''
def __init__( self : List[Any] , UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : Optional[int] ):
if hparams.sortish_sampler and hparams.gpus > 1:
SCREAMING_SNAKE_CASE : Tuple = False
elif hparams.max_tokens_per_batch is not None:
if hparams.gpus > 1:
raise NotImplementedError("Dynamic Batch size does not work for multi-gpu training" )
if hparams.sortish_sampler:
raise ValueError("--sortish_sampler and --max_tokens_per_batch may not be used simultaneously" )
super().__init__(UpperCAmelCase_ , num_labels=UpperCAmelCase_ , mode=self.mode , **UpperCAmelCase_ )
use_task_specific_params(self.model , "summarization" )
save_git_info(self.hparams.output_dir )
SCREAMING_SNAKE_CASE : int = Path(self.output_dir ) / "metrics.json"
SCREAMING_SNAKE_CASE : Union[str, Any] = Path(self.output_dir ) / "hparams.pkl"
pickle_save(self.hparams , self.hparams_save_path )
SCREAMING_SNAKE_CASE : List[Any] = 0
SCREAMING_SNAKE_CASE : Tuple = defaultdict(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Tuple = self.config.model_type
SCREAMING_SNAKE_CASE : Any = self.config.tgt_vocab_size if self.model_type == "fsmt" else self.config.vocab_size
SCREAMING_SNAKE_CASE : dict = {
"data_dir": self.hparams.data_dir,
"max_source_length": self.hparams.max_source_length,
"prefix": self.model.config.prefix or "",
}
SCREAMING_SNAKE_CASE : Tuple = {
"train": self.hparams.n_train,
"val": self.hparams.n_val,
"test": self.hparams.n_test,
}
SCREAMING_SNAKE_CASE : Union[str, Any] = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()}
SCREAMING_SNAKE_CASE : List[Any] = {
"train": self.hparams.max_target_length,
"val": self.hparams.val_max_target_length,
"test": self.hparams.test_max_target_length,
}
assert self.target_lens["train"] <= self.target_lens["val"], f'''target_lens: {self.target_lens}'''
assert self.target_lens["train"] <= self.target_lens["test"], f'''target_lens: {self.target_lens}'''
if self.hparams.freeze_embeds:
freeze_embeds(self.model )
if self.hparams.freeze_encoder:
freeze_params(self.model.get_encoder() )
assert_all_frozen(self.model.get_encoder() )
SCREAMING_SNAKE_CASE : int = get_git_info()["repo_sha"]
SCREAMING_SNAKE_CASE : Any = hparams.num_workers
SCREAMING_SNAKE_CASE : Dict = None # default to config
if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer , UpperCAmelCase_ ):
SCREAMING_SNAKE_CASE : Tuple = self.tokenizer.lang_code_to_id[hparams.tgt_lang]
SCREAMING_SNAKE_CASE : str = self.decoder_start_token_id
SCREAMING_SNAKE_CASE : Optional[int] = (
SeqaSeqDataset if hasattr(self.tokenizer , "prepare_seq2seq_batch" ) else LegacySeqaSeqDataset
)
SCREAMING_SNAKE_CASE : Any = False
SCREAMING_SNAKE_CASE : Optional[int] = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams
if self.hparams.eval_max_gen_length is not None:
SCREAMING_SNAKE_CASE : Dict = self.hparams.eval_max_gen_length
else:
SCREAMING_SNAKE_CASE : List[str] = self.model.config.max_length
SCREAMING_SNAKE_CASE : str = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric
def _A ( self : Any , UpperCAmelCase_ : Dict[str, torch.Tensor] ):
SCREAMING_SNAKE_CASE : List[str] = {
k: self.tokenizer.batch_decode(v.tolist() ) if "mask" not in k else v.shape for k, v in batch.items()
}
save_json(UpperCAmelCase_ , Path(self.output_dir ) / "text_batch.json" )
save_json({k: v.tolist() for k, v in batch.items()} , Path(self.output_dir ) / "tok_batch.json" )
SCREAMING_SNAKE_CASE : int = True
return readable_batch
def _A ( self : List[str] , UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : Any ):
return self.model(UpperCAmelCase_ , **UpperCAmelCase_ )
def _A ( self : Union[str, Any] , UpperCAmelCase_ : List[int] ):
SCREAMING_SNAKE_CASE : Union[str, Any] = self.tokenizer.batch_decode(
UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ , clean_up_tokenization_spaces=UpperCAmelCase_ )
return lmap(str.strip , UpperCAmelCase_ )
def _A ( self : str , UpperCAmelCase_ : dict ):
SCREAMING_SNAKE_CASE : str = self.tokenizer.pad_token_id
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = batch["input_ids"], batch["attention_mask"]
SCREAMING_SNAKE_CASE : List[str] = batch["labels"]
if isinstance(self.model , UpperCAmelCase_ ):
SCREAMING_SNAKE_CASE : int = self.model._shift_right(UpperCAmelCase_ )
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = shift_tokens_right(UpperCAmelCase_ , UpperCAmelCase_ )
if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero
SCREAMING_SNAKE_CASE : Optional[int] = decoder_input_ids
self.save_readable_batch(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = self(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , decoder_input_ids=UpperCAmelCase_ , use_cache=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Any = outputs["logits"]
if self.hparams.label_smoothing == 0:
# Same behavior as modeling_bart.py, besides ignoring pad_token_id
SCREAMING_SNAKE_CASE : List[str] = nn.CrossEntropyLoss(ignore_index=UpperCAmelCase_ )
assert lm_logits.shape[-1] == self.vocab_size
SCREAMING_SNAKE_CASE : Optional[Any] = ce_loss_fct(lm_logits.view(-1 , lm_logits.shape[-1] ) , tgt_ids.view(-1 ) )
else:
SCREAMING_SNAKE_CASE : List[Any] = nn.functional.log_softmax(UpperCAmelCase_ , dim=-1 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = label_smoothed_nll_loss(
UpperCAmelCase_ , UpperCAmelCase_ , self.hparams.label_smoothing , ignore_index=UpperCAmelCase_ )
return (loss,)
@property
def _A ( self : int ):
return self.tokenizer.pad_token_id
def _A ( self : Optional[int] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Union[str, Any] ):
SCREAMING_SNAKE_CASE : Dict = self._step(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = dict(zip(self.loss_names , UpperCAmelCase_ ) )
# tokens per batch
SCREAMING_SNAKE_CASE : Dict = batch["input_ids"].ne(self.pad ).sum() + batch["labels"].ne(self.pad ).sum()
SCREAMING_SNAKE_CASE : Tuple = batch["input_ids"].shape[0]
SCREAMING_SNAKE_CASE : Any = batch["input_ids"].eq(self.pad ).sum()
SCREAMING_SNAKE_CASE : Optional[Any] = batch["input_ids"].eq(self.pad ).float().mean()
# TODO(SS): make a wandb summary metric for this
return {"loss": loss_tensors[0], "log": logs}
def _A ( self : List[str] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : int ):
return self._generative_step(UpperCAmelCase_ )
def _A ( self : Any , UpperCAmelCase_ : Any , UpperCAmelCase_ : str="val" ):
self.step_count += 1
SCREAMING_SNAKE_CASE : int = {k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names}
SCREAMING_SNAKE_CASE : Union[str, Any] = losses["loss"]
SCREAMING_SNAKE_CASE : Tuple = {
k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ["gen_time", "gen_len"]
}
SCREAMING_SNAKE_CASE : str = (
generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric]
)
SCREAMING_SNAKE_CASE : torch.FloatTensor = torch.tensor(UpperCAmelCase_ ).type_as(UpperCAmelCase_ )
generative_metrics.update({k: v.item() for k, v in losses.items()} )
losses.update(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = {f'''{prefix}_avg_{k}''': x for k, x in losses.items()}
SCREAMING_SNAKE_CASE : Tuple = self.step_count
self.metrics[prefix].append(UpperCAmelCase_ ) # callback writes this to self.metrics_save_path
SCREAMING_SNAKE_CASE : Any = flatten_list([x["preds"] for x in outputs] )
return {
"log": all_metrics,
"preds": preds,
f'''{prefix}_loss''': loss,
f'''{prefix}_{self.val_metric}''': metric_tensor,
}
def _A ( self : Optional[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[Any] ):
return calculate_rouge(UpperCAmelCase_ , UpperCAmelCase_ )
def _A ( self : List[str] , UpperCAmelCase_ : dict ):
SCREAMING_SNAKE_CASE : Union[str, Any] = time.time()
# parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens')
SCREAMING_SNAKE_CASE : Union[str, Any] = self.model.generate(
batch["input_ids"] , attention_mask=batch["attention_mask"] , use_cache=UpperCAmelCase_ , decoder_start_token_id=self.decoder_start_token_id , num_beams=self.eval_beams , max_length=self.eval_max_length , )
SCREAMING_SNAKE_CASE : Any = (time.time() - ta) / batch["input_ids"].shape[0]
SCREAMING_SNAKE_CASE : List[str] = self.ids_to_clean_text(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : List[str] = self.ids_to_clean_text(batch["labels"] )
SCREAMING_SNAKE_CASE : Dict = self._step(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Dict = dict(zip(self.loss_names , UpperCAmelCase_ ) )
SCREAMING_SNAKE_CASE : Dict = self.calc_generative_metrics(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : int = np.mean(lmap(UpperCAmelCase_ , UpperCAmelCase_ ) )
base_metrics.update(gen_time=UpperCAmelCase_ , gen_len=UpperCAmelCase_ , preds=UpperCAmelCase_ , target=UpperCAmelCase_ , **UpperCAmelCase_ )
return base_metrics
def _A ( self : Dict , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Any ):
return self._generative_step(UpperCAmelCase_ )
def _A ( self : str , UpperCAmelCase_ : int ):
return self.validation_epoch_end(UpperCAmelCase_ , prefix="test" )
def _A ( self : Optional[int] , UpperCAmelCase_ : List[str] ):
SCREAMING_SNAKE_CASE : Optional[Any] = self.n_obs[type_path]
SCREAMING_SNAKE_CASE : int = self.target_lens[type_path]
SCREAMING_SNAKE_CASE : Any = self.dataset_class(
self.tokenizer , type_path=UpperCAmelCase_ , n_obs=UpperCAmelCase_ , max_target_length=UpperCAmelCase_ , **self.dataset_kwargs , )
return dataset
def _A ( self : Dict , UpperCAmelCase_ : str , UpperCAmelCase_ : int , UpperCAmelCase_ : bool = False ):
SCREAMING_SNAKE_CASE : int = self.get_dataset(UpperCAmelCase_ )
if self.hparams.sortish_sampler and type_path != "test" and type_path != "val":
SCREAMING_SNAKE_CASE : int = dataset.make_sortish_sampler(UpperCAmelCase_ , distributed=self.hparams.gpus > 1 )
return DataLoader(
UpperCAmelCase_ , batch_size=UpperCAmelCase_ , collate_fn=dataset.collate_fn , shuffle=UpperCAmelCase_ , num_workers=self.num_workers , sampler=UpperCAmelCase_ , )
elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val":
SCREAMING_SNAKE_CASE : Union[str, Any] = dataset.make_dynamic_sampler(
self.hparams.max_tokens_per_batch , distributed=self.hparams.gpus > 1 )
return DataLoader(
UpperCAmelCase_ , batch_sampler=UpperCAmelCase_ , collate_fn=dataset.collate_fn , num_workers=self.num_workers , )
else:
return DataLoader(
UpperCAmelCase_ , batch_size=UpperCAmelCase_ , collate_fn=dataset.collate_fn , shuffle=UpperCAmelCase_ , num_workers=self.num_workers , sampler=UpperCAmelCase_ , )
def _A ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE : str = self.get_dataloader("train" , batch_size=self.hparams.train_batch_size , shuffle=UpperCAmelCase_ )
return dataloader
def _A ( self : str ):
return self.get_dataloader("val" , batch_size=self.hparams.eval_batch_size )
def _A ( self : int ):
return self.get_dataloader("test" , batch_size=self.hparams.eval_batch_size )
@staticmethod
def _A ( UpperCAmelCase_ : Any , UpperCAmelCase_ : List[Any] ):
BaseTransformer.add_model_specific_args(UpperCAmelCase_ , UpperCAmelCase_ )
add_generic_args(UpperCAmelCase_ , UpperCAmelCase_ )
parser.add_argument(
"--max_source_length" , default=1024 , type=UpperCAmelCase_ , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--max_target_length" , default=56 , type=UpperCAmelCase_ , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--val_max_target_length" , default=142 , type=UpperCAmelCase_ , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--test_max_target_length" , default=142 , type=UpperCAmelCase_ , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument("--freeze_encoder" , action="store_true" )
parser.add_argument("--freeze_embeds" , action="store_true" )
parser.add_argument("--sortish_sampler" , action="store_true" , default=UpperCAmelCase_ )
parser.add_argument("--overwrite_output_dir" , action="store_true" , default=UpperCAmelCase_ )
parser.add_argument("--max_tokens_per_batch" , type=UpperCAmelCase_ , default=UpperCAmelCase_ )
parser.add_argument("--logger_name" , type=UpperCAmelCase_ , choices=["default", "wandb", "wandb_shared"] , default="default" )
parser.add_argument("--n_train" , type=UpperCAmelCase_ , default=-1 , required=UpperCAmelCase_ , help="# examples. -1 means use all." )
parser.add_argument("--n_val" , type=UpperCAmelCase_ , default=500 , required=UpperCAmelCase_ , help="# examples. -1 means use all." )
parser.add_argument("--n_test" , type=UpperCAmelCase_ , default=-1 , required=UpperCAmelCase_ , help="# examples. -1 means use all." )
parser.add_argument(
"--task" , type=UpperCAmelCase_ , default="summarization" , required=UpperCAmelCase_ , help="# examples. -1 means use all." )
parser.add_argument("--label_smoothing" , type=UpperCAmelCase_ , default=0.0 , required=UpperCAmelCase_ )
parser.add_argument("--src_lang" , type=UpperCAmelCase_ , default="" , required=UpperCAmelCase_ )
parser.add_argument("--tgt_lang" , type=UpperCAmelCase_ , default="" , required=UpperCAmelCase_ )
parser.add_argument("--eval_beams" , type=UpperCAmelCase_ , default=UpperCAmelCase_ , required=UpperCAmelCase_ )
parser.add_argument(
"--val_metric" , type=UpperCAmelCase_ , default=UpperCAmelCase_ , required=UpperCAmelCase_ , choices=["bleu", "rouge2", "loss", None] )
parser.add_argument("--eval_max_gen_length" , type=UpperCAmelCase_ , default=UpperCAmelCase_ , help="never generate more than n tokens" )
parser.add_argument("--save_top_k" , type=UpperCAmelCase_ , default=1 , required=UpperCAmelCase_ , help="How many checkpoints to save" )
parser.add_argument(
"--early_stopping_patience" , type=UpperCAmelCase_ , default=-1 , required=UpperCAmelCase_ , help=(
"-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So"
" val_check_interval will effect it."
) , )
return parser
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : Union[str, Any] = '''translation'''
UpperCamelCase_ : Union[str, Any] = ['''loss''']
UpperCamelCase_ : List[Any] = ['''bleu''']
UpperCamelCase_ : Dict = '''bleu'''
def __init__( self : int , UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : Optional[int] ):
super().__init__(UpperCAmelCase_ , **UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = hparams.src_lang
SCREAMING_SNAKE_CASE : Tuple = hparams.tgt_lang
def _A ( self : Dict , UpperCAmelCase_ : int , UpperCAmelCase_ : Union[str, Any] ):
return calculate_bleu(UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCamelCase__ ( lowercase , lowercase=None ):
"""simple docstring"""
Path(args.output_dir ).mkdir(exist_ok=lowercase )
check_output_dir(lowercase , expected_items=3 )
if model is None:
if "summarization" in args.task:
SCREAMING_SNAKE_CASE : SummarizationModule = SummarizationModule(lowercase )
else:
SCREAMING_SNAKE_CASE : SummarizationModule = TranslationModule(lowercase )
SCREAMING_SNAKE_CASE : Dict = Path(args.data_dir ).name
if (
args.logger_name == "default"
or args.fast_dev_run
or str(args.output_dir ).startswith("/tmp" )
or str(args.output_dir ).startswith("/var" )
):
SCREAMING_SNAKE_CASE : Dict = True # don't pollute wandb logs unnecessarily
elif args.logger_name == "wandb":
from pytorch_lightning.loggers import WandbLogger
SCREAMING_SNAKE_CASE : Union[str, Any] = os.environ.get("WANDB_PROJECT" , lowercase )
SCREAMING_SNAKE_CASE : Any = WandbLogger(name=model.output_dir.name , project=lowercase )
elif args.logger_name == "wandb_shared":
from pytorch_lightning.loggers import WandbLogger
SCREAMING_SNAKE_CASE : Any = WandbLogger(name=model.output_dir.name , project=F'''hf_{dataset}''' )
if args.early_stopping_patience >= 0:
SCREAMING_SNAKE_CASE : str = get_early_stopping_callback(model.val_metric , args.early_stopping_patience )
else:
SCREAMING_SNAKE_CASE : int = False
SCREAMING_SNAKE_CASE : Tuple = args.val_metric == "loss"
SCREAMING_SNAKE_CASE : pl.Trainer = generic_train(
lowercase , lowercase , logging_callback=SeqaSeqLoggingCallback() , checkpoint_callback=get_checkpoint_callback(
args.output_dir , model.val_metric , args.save_top_k , lowercase ) , early_stopping_callback=lowercase , logger=lowercase , )
pickle_save(model.hparams , model.output_dir / "hparams.pkl" )
if not args.do_predict:
return model
SCREAMING_SNAKE_CASE : str = ""
SCREAMING_SNAKE_CASE : List[str] = sorted(glob.glob(os.path.join(args.output_dir , "*.ckpt" ) , recursive=lowercase ) )
if checkpoints:
SCREAMING_SNAKE_CASE : Tuple = checkpoints[-1]
SCREAMING_SNAKE_CASE : int = checkpoints[-1]
trainer.logger.log_hyperparams(model.hparams )
# test() without a model tests using the best checkpoint automatically
trainer.test()
return model
if __name__ == "__main__":
snake_case = argparse.ArgumentParser()
snake_case = pl.Trainer.add_argparse_args(parser)
snake_case = SummarizationModule.add_model_specific_args(parser, os.getcwd())
snake_case = parser.parse_args()
main(args)
| 62 |
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = os.path.join(args.tf_model_dir , "parameters.json" )
SCREAMING_SNAKE_CASE : Union[str, Any] = json.loads(open(lowercase ).read() )
if not params:
raise ValueError(
F'''It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.''' )
if not args.output.endswith(".pt" ):
SCREAMING_SNAKE_CASE : Optional[int] = args.output + ".pt"
SCREAMING_SNAKE_CASE : Any = OrderedDict()
with tf.device("/CPU:0" ):
SCREAMING_SNAKE_CASE : Union[str, Any] = tf.train.load_checkpoint(args.tf_model_dir )
SCREAMING_SNAKE_CASE : Union[str, Any] = reader.get_variable_to_shape_map()
for key_name in shapes.keys():
SCREAMING_SNAKE_CASE : Any = reader.get_tensor(lowercase ).astype(np.floataa )
if key_name.endswith("/adam_m" ) or key_name.endswith("/adam_v" ):
continue
if key_name.startswith("pasts/" ):
if key_name.startswith("pasts/mlp" ):
SCREAMING_SNAKE_CASE : Union[str, Any] = int(key_name[9] )
elif key_name.startswith("pasts/out" ):
SCREAMING_SNAKE_CASE : Optional[int] = 8
SCREAMING_SNAKE_CASE : List[Any] = "model.sqout.%d.weight" % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
SCREAMING_SNAKE_CASE : Optional[int] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor(lowercase )
elif key_name.startswith("model/moe" ):
SCREAMING_SNAKE_CASE : Union[str, Any] = int(key_name[9:].split("/" )[0] )
if key_name.endswith("/switch_gating/kernel" ):
SCREAMING_SNAKE_CASE : Union[str, Any] = "model.blocks.%d.feed_forward.mlp.router.classifier.weight" % player
SCREAMING_SNAKE_CASE : Any = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
SCREAMING_SNAKE_CASE : Any = torch.tensor(lowercase )
elif key_name.endswith("/softmlp/kernel" ):
SCREAMING_SNAKE_CASE : Dict = "model.blocks.%d.feed_forward.soft_bypass_mlp.weight" % player
SCREAMING_SNAKE_CASE : Any = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor(lowercase )
elif key_name.endswith("/wo/kernel" ) or key_name.endswith("/wi/kernel" ):
SCREAMING_SNAKE_CASE : Optional[int] = key_name[-9:-7]
for i in range(16 ):
SCREAMING_SNAKE_CASE : List[Any] = "model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight" % (player, i, nlayer)
SCREAMING_SNAKE_CASE : List[str] = (
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
SCREAMING_SNAKE_CASE : Tuple = torch.tensor(lowercase )
elif key_name.startswith("model/mlp" ):
SCREAMING_SNAKE_CASE : str = int(key_name[9:].split("/" )[0] )
if key_name.endswith("/p1/kernel" ):
SCREAMING_SNAKE_CASE : Dict = "model.blocks.%d.feed_forward.mlp.wi.weight" % player
SCREAMING_SNAKE_CASE : Union[str, Any] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
SCREAMING_SNAKE_CASE : Any = torch.tensor(lowercase )
elif key_name.endswith("/p1/bias" ):
SCREAMING_SNAKE_CASE : Union[str, Any] = "model.blocks.%d.feed_forward.mlp.wi.bias" % player
SCREAMING_SNAKE_CASE : Optional[Any] = vnp.copy() # same because it is one dimensional
SCREAMING_SNAKE_CASE : List[Any] = torch.tensor(lowercase )
elif key_name.endswith("/p2/kernel" ):
SCREAMING_SNAKE_CASE : str = "model.blocks.%d.feed_forward.mlp.wo.weight" % player
SCREAMING_SNAKE_CASE : List[Any] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
SCREAMING_SNAKE_CASE : List[Any] = torch.tensor(lowercase )
elif key_name.endswith("/p2/bias" ):
SCREAMING_SNAKE_CASE : Tuple = "model.blocks.%d.feed_forward.mlp.wo.bias" % player
SCREAMING_SNAKE_CASE : str = vnp.copy() # same because it is one dimensional
SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor(lowercase )
elif key_name.startswith("model/ln" ):
SCREAMING_SNAKE_CASE : Union[str, Any] = int(key_name[8:].split("/" )[0] )
if key_name.endswith("/b" ):
SCREAMING_SNAKE_CASE : Union[str, Any] = "model.blocks.%d.feed_forward.norm.bias" % player
SCREAMING_SNAKE_CASE : Union[str, Any] = vnp.copy() # same because it is one dimensional
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor(lowercase )
elif key_name.endswith("/g" ):
SCREAMING_SNAKE_CASE : List[str] = "model.blocks.%d.feed_forward.norm.weight" % player
SCREAMING_SNAKE_CASE : Optional[Any] = vnp.copy() # same because it is one dimensional
SCREAMING_SNAKE_CASE : Dict = torch.tensor(lowercase )
elif key_name.startswith("model/att" ):
SCREAMING_SNAKE_CASE : Optional[int] = int(key_name[9:].split("/" )[0] )
if key_name.endswith("/qkv/kernel" ):
SCREAMING_SNAKE_CASE : List[str] = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
SCREAMING_SNAKE_CASE : List[str] = state[:, 0, :, :]
SCREAMING_SNAKE_CASE : Tuple = state[:, 1, :, :]
SCREAMING_SNAKE_CASE : List[Any] = state[:, 2, :, :]
SCREAMING_SNAKE_CASE : Tuple = (
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
SCREAMING_SNAKE_CASE : List[Any] = (
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
SCREAMING_SNAKE_CASE : Union[str, Any] = (
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
SCREAMING_SNAKE_CASE : int = "model.blocks.%d.self_attn.self_attn.q_proj.weight" % player
SCREAMING_SNAKE_CASE : List[Any] = torch.tensor(lowercase )
SCREAMING_SNAKE_CASE : Optional[int] = "model.blocks.%d.self_attn.self_attn.k_proj.weight" % player
SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor(lowercase )
SCREAMING_SNAKE_CASE : Optional[Any] = "model.blocks.%d.self_attn.self_attn.v_proj.weight" % player
SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor(lowercase )
elif key_name.endswith("/o/kernel" ):
SCREAMING_SNAKE_CASE : Union[str, Any] = "model.blocks.%d.self_attn.self_attn.out_proj.weight" % player
SCREAMING_SNAKE_CASE : Optional[int] = (
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
SCREAMING_SNAKE_CASE : str = torch.tensor(lowercase )
elif key_name.startswith("model/an" ):
SCREAMING_SNAKE_CASE : int = int(key_name[8:].split("/" )[0] )
if key_name.endswith("/b" ):
SCREAMING_SNAKE_CASE : List[Any] = "model.blocks.%d.self_attn.norm.bias" % player
SCREAMING_SNAKE_CASE : int = vnp.copy() # same because it is one dimensional
SCREAMING_SNAKE_CASE : List[str] = torch.tensor(lowercase )
elif key_name.endswith("/g" ):
SCREAMING_SNAKE_CASE : Tuple = "model.blocks.%d.self_attn.norm.weight" % player
SCREAMING_SNAKE_CASE : List[str] = vnp.copy() # same because it is one dimensional
SCREAMING_SNAKE_CASE : str = torch.tensor(lowercase )
elif (
key_name.startswith("model/wte" )
or key_name.startswith("model/wpe" )
or key_name.startswith("model/ete" )
):
SCREAMING_SNAKE_CASE : str = {"wte": "embed_tokens", "wpe": "position_embeddings", "ete": "extra_position_embeddings"}[
key_name[-3:]
]
SCREAMING_SNAKE_CASE : List[str] = "model.%s.weight" % nlayer
SCREAMING_SNAKE_CASE : Union[str, Any] = vnp.copy() # same in embedded
SCREAMING_SNAKE_CASE : str = torch.tensor(lowercase )
if key_name.startswith("model/wte" ):
SCREAMING_SNAKE_CASE : Union[str, Any] = "lm_head.weight"
SCREAMING_SNAKE_CASE : List[Any] = vnp.copy() # same in embedded
SCREAMING_SNAKE_CASE : Dict = torch.tensor(lowercase )
elif key_name.startswith("model/wob" ):
SCREAMING_SNAKE_CASE : List[Any] = "final_logits_bias"
SCREAMING_SNAKE_CASE : Optional[Any] = vnp.copy() # same in embedded
SCREAMING_SNAKE_CASE : List[str] = state.reshape((1, -1) )
SCREAMING_SNAKE_CASE : int = torch.tensor(lowercase )
elif key_name == "model/dense/kernel":
SCREAMING_SNAKE_CASE : Optional[int] = "model.last_project.weight"
SCREAMING_SNAKE_CASE : Dict = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
SCREAMING_SNAKE_CASE : Tuple = torch.tensor(lowercase )
elif key_name == "model/dense_1/bias":
SCREAMING_SNAKE_CASE : str = "model.last_project.bias"
SCREAMING_SNAKE_CASE : int = vnp.copy() # same because it is one dimensional
SCREAMING_SNAKE_CASE : str = torch.tensor(lowercase )
torch.save(lowercase , args.output )
if __name__ == "__main__":
snake_case = argparse.ArgumentParser(
description="""model converter.""", formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument("""--tf_model_dir""", metavar="""PATH""", type=str, required=True, help="""import model""")
parser.add_argument("""--output""", metavar="""PATH""", type=str, required=True, help="""output model""")
snake_case = parser.parse_args()
convert_tf_gptsan_to_pt(args)
| 62 | 1 |
SCREAMING_SNAKE_CASE = [
999,
800,
799,
600,
599,
500,
400,
399,
377,
355,
333,
311,
288,
266,
244,
222,
200,
199,
177,
155,
133,
111,
88,
66,
44,
22,
0,
]
SCREAMING_SNAKE_CASE = [
999,
976,
952,
928,
905,
882,
858,
857,
810,
762,
715,
714,
572,
429,
428,
286,
285,
238,
190,
143,
142,
118,
95,
71,
47,
24,
0,
]
SCREAMING_SNAKE_CASE = [
999,
988,
977,
966,
955,
944,
933,
922,
911,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
350,
300,
299,
266,
233,
200,
199,
179,
159,
140,
120,
100,
99,
88,
77,
66,
55,
44,
33,
22,
11,
0,
]
SCREAMING_SNAKE_CASE = [
999,
995,
992,
989,
985,
981,
978,
975,
971,
967,
964,
961,
957,
956,
951,
947,
942,
937,
933,
928,
923,
919,
914,
913,
908,
903,
897,
892,
887,
881,
876,
871,
870,
864,
858,
852,
846,
840,
834,
828,
827,
820,
813,
806,
799,
792,
785,
784,
777,
770,
763,
756,
749,
742,
741,
733,
724,
716,
707,
699,
698,
688,
677,
666,
656,
655,
645,
634,
623,
613,
612,
598,
584,
570,
569,
555,
541,
527,
526,
505,
484,
483,
462,
440,
439,
396,
395,
352,
351,
308,
307,
264,
263,
220,
219,
176,
132,
88,
44,
0,
]
SCREAMING_SNAKE_CASE = [
999,
997,
995,
992,
990,
988,
986,
984,
981,
979,
977,
975,
972,
970,
968,
966,
964,
961,
959,
957,
956,
954,
951,
949,
946,
944,
941,
939,
936,
934,
931,
929,
926,
924,
921,
919,
916,
914,
913,
910,
907,
905,
902,
899,
896,
893,
891,
888,
885,
882,
879,
877,
874,
871,
870,
867,
864,
861,
858,
855,
852,
849,
846,
843,
840,
837,
834,
831,
828,
827,
824,
821,
817,
814,
811,
808,
804,
801,
798,
795,
791,
788,
785,
784,
780,
777,
774,
770,
766,
763,
760,
756,
752,
749,
746,
742,
741,
737,
733,
730,
726,
722,
718,
714,
710,
707,
703,
699,
698,
694,
690,
685,
681,
677,
673,
669,
664,
660,
656,
655,
650,
646,
641,
636,
632,
627,
622,
618,
613,
612,
607,
602,
596,
591,
586,
580,
575,
570,
569,
563,
557,
551,
545,
539,
533,
527,
526,
519,
512,
505,
498,
491,
484,
483,
474,
466,
457,
449,
440,
439,
428,
418,
407,
396,
395,
381,
366,
352,
351,
330,
308,
307,
286,
264,
263,
242,
220,
219,
176,
175,
132,
131,
88,
44,
0,
]
SCREAMING_SNAKE_CASE = [
999,
991,
982,
974,
966,
958,
950,
941,
933,
925,
916,
908,
900,
899,
874,
850,
825,
800,
799,
700,
600,
500,
400,
300,
200,
100,
0,
]
SCREAMING_SNAKE_CASE = [
999,
992,
985,
978,
971,
964,
957,
949,
942,
935,
928,
921,
914,
907,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
300,
299,
200,
199,
100,
99,
0,
]
SCREAMING_SNAKE_CASE = [
999,
996,
992,
989,
985,
982,
979,
975,
972,
968,
965,
961,
958,
955,
951,
948,
944,
941,
938,
934,
931,
927,
924,
920,
917,
914,
910,
907,
903,
900,
899,
891,
884,
876,
869,
861,
853,
846,
838,
830,
823,
815,
808,
800,
799,
788,
777,
766,
755,
744,
733,
722,
711,
700,
699,
688,
677,
666,
655,
644,
633,
622,
611,
600,
599,
585,
571,
557,
542,
528,
514,
500,
499,
485,
471,
457,
442,
428,
414,
400,
399,
379,
359,
340,
320,
300,
299,
279,
259,
240,
220,
200,
199,
166,
133,
100,
99,
66,
33,
0,
]
| 718 |
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
UpperCAmelCase_ = 0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Tuple:
UpperCAmelCase_ = 0
while b > 0:
if b & 1:
UpperCAmelCase_ = ((res % c) + (a % c)) % c
a += a
b >>= 1
return res
| 23 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_a : Optional[int] = logging.get_logger(__name__)
_a : List[Any] = {
"google/vit-base-patch16-224": "https://huggingface.co/vit-base-patch16-224/resolve/main/config.json",
# See all ViT models at https://huggingface.co/models?filter=vit
}
class _UpperCAmelCase ( lowerCAmelCase_ ):
a : Optional[Any] ="""vit"""
def __init__( self,__SCREAMING_SNAKE_CASE=7_68,__SCREAMING_SNAKE_CASE=12,__SCREAMING_SNAKE_CASE=12,__SCREAMING_SNAKE_CASE=30_72,__SCREAMING_SNAKE_CASE="gelu",__SCREAMING_SNAKE_CASE=0.0,__SCREAMING_SNAKE_CASE=0.0,__SCREAMING_SNAKE_CASE=0.02,__SCREAMING_SNAKE_CASE=1e-12,__SCREAMING_SNAKE_CASE=2_24,__SCREAMING_SNAKE_CASE=16,__SCREAMING_SNAKE_CASE=3,__SCREAMING_SNAKE_CASE=True,__SCREAMING_SNAKE_CASE=16,**__SCREAMING_SNAKE_CASE,):
'''simple docstring'''
super().__init__(**_a )
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = initializer_range
__lowerCAmelCase = layer_norm_eps
__lowerCAmelCase = image_size
__lowerCAmelCase = patch_size
__lowerCAmelCase = num_channels
__lowerCAmelCase = qkv_bias
__lowerCAmelCase = encoder_stride
class _UpperCAmelCase ( lowerCAmelCase_ ):
a : Tuple =version.parse("""1.11""" )
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return 1e-4
| 689 |
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _snake_case ( snake_case , unittest.TestCase ):
UpperCamelCase__ = DiTPipeline
UpperCamelCase__ = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
UpperCamelCase__ = PipelineTesterMixin.required_optional_params - {
'latents',
'num_images_per_prompt',
'callback',
'callback_steps',
}
UpperCamelCase__ = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
UpperCamelCase__ = False
def SCREAMING_SNAKE_CASE ( self ):
torch.manual_seed(0 )
__magic_name__ : Tuple = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=_a , activation_fn="gelu-approximate" , num_embeds_ada_norm=1_000 , norm_type="ada_norm_zero" , norm_elementwise_affine=_a , )
__magic_name__ : int = AutoencoderKL()
__magic_name__ : str = DDIMScheduler()
__magic_name__ : int = {"transformer": transformer.eval(), "vae": vae.eval(), "scheduler": scheduler}
return components
def SCREAMING_SNAKE_CASE ( self , _a , _a=0 ):
if str(_a ).startswith("mps" ):
__magic_name__ : str = torch.manual_seed(_a )
else:
__magic_name__ : Optional[Any] = torch.Generator(device=_a ).manual_seed(_a )
__magic_name__ : Dict = {
"class_labels": [1],
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Optional[Any] = "cpu"
__magic_name__ : Optional[int] = self.get_dummy_components()
__magic_name__ : Any = self.pipeline_class(**_a )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
__magic_name__ : Tuple = self.get_dummy_inputs(_a )
__magic_name__ : Any = pipe(**_a ).images
__magic_name__ : List[str] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
__magic_name__ : List[Any] = np.array([0.29_46, 0.66_01, 0.43_29, 0.32_96, 0.41_44, 0.53_19, 0.72_73, 0.50_13, 0.44_57] )
__magic_name__ : Union[str, Any] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_a , 1e-3 )
def SCREAMING_SNAKE_CASE ( self ):
self._test_inference_batch_single_identical(relax_max_difference=_a , expected_max_diff=1e-3 )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def SCREAMING_SNAKE_CASE ( self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@require_torch_gpu
@slow
class _snake_case ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Optional[Any] = torch.manual_seed(0 )
__magic_name__ : Optional[int] = DiTPipeline.from_pretrained("facebook/DiT-XL-2-256" )
pipe.to("cuda" )
__magic_name__ : int = ["vase", "umbrella", "white shark", "white wolf"]
__magic_name__ : str = pipe.get_label_ids(_a )
__magic_name__ : Dict = pipe(_a , generator=_a , num_inference_steps=40 , output_type="np" ).images
for word, image in zip(_a , _a ):
__magic_name__ : int = load_numpy(
f'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy''' )
assert np.abs((expected_image - image).max() ) < 1e-2
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : str = DiTPipeline.from_pretrained("facebook/DiT-XL-2-512" )
__magic_name__ : Dict = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to("cuda" )
__magic_name__ : List[str] = ["vase", "umbrella"]
__magic_name__ : Any = pipe.get_label_ids(_a )
__magic_name__ : List[str] = torch.manual_seed(0 )
__magic_name__ : Optional[Any] = pipe(_a , generator=_a , num_inference_steps=25 , output_type="np" ).images
for word, image in zip(_a , _a ):
__magic_name__ : Any = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
f'''/dit/{word}_512.npy''' )
assert np.abs((expected_image - image).max() ) < 1e-1
| 124 | 0 |
import math
from dataclasses import dataclass
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from diffusers.configuration_utils import ConfigMixin, register_to_config
from diffusers.schedulers.scheduling_utils import SchedulerMixin
from diffusers.utils import BaseOutput, deprecate
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM
class lowerCAmelCase ( a ):
"""simple docstring"""
__lowercase :torch.FloatTensor
__lowercase :Optional[torch.FloatTensor] = None
def lowerCamelCase_ ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Dict=0.9_99 , _lowerCamelCase : List[str]="cosine" , ):
if alpha_transform_type == "cosine":
def alpha_bar_fn(_lowerCamelCase : List[Any] ):
return math.cos((t + 0.0_08) / 1.0_08 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(_lowerCamelCase : Union[str, Any] ):
return math.exp(t * -12.0 )
else:
raise ValueError(F"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
lowerCamelCase_ = []
for i in range(_lowerCamelCase ):
lowerCamelCase_ = i / num_diffusion_timesteps
lowerCamelCase_ = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(_lowerCamelCase ) / alpha_bar_fn(_lowerCamelCase ) , _lowerCamelCase ) )
return torch.tensor(_lowerCamelCase , dtype=torch.floataa )
class lowerCAmelCase ( a , a ):
"""simple docstring"""
__lowercase :List[Any] = 1
@register_to_config
def __init__( self , UpperCamelCase__ = 1_000 , UpperCamelCase__ = 0.0_001 , UpperCamelCase__ = 0.02 , UpperCamelCase__ = "linear" , UpperCamelCase__ = None , UpperCamelCase__ = True , UpperCamelCase__ = True , UpperCamelCase__ = 0 , UpperCamelCase__ = "epsilon" , UpperCamelCase__ = 1.0 , **UpperCamelCase__ , ) -> int:
'''simple docstring'''
if kwargs.get('''set_alpha_to_one''' , UpperCamelCase__ ) is not None:
lowerCamelCase_ = (
'''The `set_alpha_to_one` argument is deprecated. Please use `set_alpha_to_zero` instead.'''
)
deprecate('''set_alpha_to_one''' , '''1.0.0''' , UpperCamelCase__ , standard_warn=UpperCamelCase__ )
lowerCamelCase_ = kwargs['''set_alpha_to_one''']
if trained_betas is not None:
lowerCamelCase_ = torch.tensor(UpperCamelCase__ , dtype=torch.floataa )
elif beta_schedule == "linear":
lowerCamelCase_ = torch.linspace(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
lowerCamelCase_ = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , UpperCamelCase__ , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
lowerCamelCase_ = betas_for_alpha_bar(UpperCamelCase__ )
else:
raise NotImplementedError(F"""{beta_schedule} does is not implemented for {self.__class__}""" )
lowerCamelCase_ = 1.0 - self.betas
lowerCamelCase_ = torch.cumprod(self.alphas , dim=0 )
# At every step in inverted ddim, we are looking into the next alphas_cumprod
# For the final step, there is no next alphas_cumprod, and the index is out of bounds
# `set_alpha_to_zero` decides whether we set this parameter simply to zero
# in this case, self.step() just output the predicted noise
# or whether we use the final alpha of the "non-previous" one.
lowerCamelCase_ = torch.tensor(0.0 ) if set_alpha_to_zero else self.alphas_cumprod[-1]
# standard deviation of the initial noise distribution
lowerCamelCase_ = 1.0
# setable values
lowerCamelCase_ = None
lowerCamelCase_ = torch.from_numpy(np.arange(0 , UpperCamelCase__ ).copy().astype(np.intaa ) )
def _lowerCAmelCase ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> torch.FloatTensor:
'''simple docstring'''
return sample
def _lowerCAmelCase ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> List[str]:
'''simple docstring'''
if num_inference_steps > self.config.num_train_timesteps:
raise ValueError(
F"""`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:"""
F""" {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle"""
F""" maximal {self.config.num_train_timesteps} timesteps.""" )
lowerCamelCase_ = num_inference_steps
lowerCamelCase_ = self.config.num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
lowerCamelCase_ = (np.arange(0 , UpperCamelCase__ ) * step_ratio).round().copy().astype(np.intaa )
lowerCamelCase_ = torch.from_numpy(UpperCamelCase__ ).to(UpperCamelCase__ )
self.timesteps += self.config.steps_offset
def _lowerCAmelCase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = 0.0 , UpperCamelCase__ = False , UpperCamelCase__ = None , UpperCamelCase__ = True , ) -> Union[DDIMSchedulerOutput, Tuple]:
'''simple docstring'''
lowerCamelCase_ = timestep + self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
# change original implementation to exactly match noise levels for analogous forward process
lowerCamelCase_ = self.alphas_cumprod[timestep]
lowerCamelCase_ = (
self.alphas_cumprod[prev_timestep]
if prev_timestep < self.config.num_train_timesteps
else self.final_alpha_cumprod
)
lowerCamelCase_ = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
if self.config.prediction_type == "epsilon":
lowerCamelCase_ = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
lowerCamelCase_ = model_output
elif self.config.prediction_type == "sample":
lowerCamelCase_ = model_output
lowerCamelCase_ = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
elif self.config.prediction_type == "v_prediction":
lowerCamelCase_ = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
lowerCamelCase_ = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample
else:
raise ValueError(
F"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or"""
''' `v_prediction`''' )
# 4. Clip or threshold "predicted x_0"
if self.config.clip_sample:
lowerCamelCase_ = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
# 5. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
lowerCamelCase_ = (1 - alpha_prod_t_prev) ** 0.5 * pred_epsilon
# 6. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
lowerCamelCase_ = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if not return_dict:
return (prev_sample, pred_original_sample)
return DDIMSchedulerOutput(prev_sample=UpperCamelCase__ , pred_original_sample=UpperCamelCase__ )
def __len__( self ) -> Union[str, Any]:
'''simple docstring'''
return self.config.num_train_timesteps | 706 |
"""simple docstring"""
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFAutoModel, is_tensorflow_text_available, is_tf_available
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_tensorflow_text_available():
from transformers.models.bert import TFBertTokenizer
__lowercase : List[str] = ["""bert-base-uncased""", """bert-base-cased"""]
__lowercase : Tuple = """hf-internal-testing/tiny-bert-tf-only"""
if is_tf_available():
class lowerCAmelCase ( tf.keras.Model ):
"""simple docstring"""
def __init__( self , UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
lowerCamelCase_ = tokenizer
lowerCamelCase_ = AutoConfig.from_pretrained(UpperCamelCase__ )
lowerCamelCase_ = TFAutoModel.from_config(UpperCamelCase__ )
def _lowerCAmelCase ( self , UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ = self.tokenizer(UpperCamelCase__ )
lowerCamelCase_ = self.bert(**UpperCamelCase__ )
return out["pooler_output"]
@require_tf
@require_tensorflow_text
class lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self ) -> int:
'''simple docstring'''
super().setUp()
lowerCamelCase_ = [
BertTokenizer.from_pretrained(UpperCamelCase__ ) for checkpoint in (TOKENIZER_CHECKPOINTS * 2)
] # repeat for when fast_bert_tokenizer=false
lowerCamelCase_ = [TFBertTokenizer.from_pretrained(UpperCamelCase__ ) for checkpoint in TOKENIZER_CHECKPOINTS] + [
TFBertTokenizer.from_pretrained(UpperCamelCase__ , use_fast_bert_tokenizer=UpperCamelCase__ )
for checkpoint in TOKENIZER_CHECKPOINTS
]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
lowerCamelCase_ = [
'''This is a straightforward English test sentence.''',
'''This one has some weird characters\rto\nsee\r\nif those\u00E9break things.''',
'''Now we\'re going to add some Chinese: 一 二 三 一二三''',
'''And some much more rare Chinese: 齉 堃 齉堃''',
'''Je vais aussi écrire en français pour tester les accents''',
'''Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ''',
]
lowerCamelCase_ = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def _lowerCAmelCase ( self ) -> List[str]:
'''simple docstring'''
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in (self.test_sentences, self.paired_sentences):
lowerCamelCase_ = tokenizer(UpperCamelCase__ , return_tensors='''tf''' , padding='''longest''' )
lowerCamelCase_ = tf_tokenizer(UpperCamelCase__ )
for key in python_outputs.keys():
self.assertTrue(tf.reduce_all(python_outputs[key].shape == tf_outputs[key].shape ) )
self.assertTrue(tf.reduce_all(tf.cast(python_outputs[key] , tf.intaa ) == tf_outputs[key] ) )
@slow
def _lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
lowerCamelCase_ = tf_tokenizer(self.paired_sentences )
lowerCamelCase_ = tf_tokenizer(
text=[sentence[0] for sentence in self.paired_sentences] , text_pair=[sentence[1] for sentence in self.paired_sentences] , )
for key in merged_outputs.keys():
self.assertTrue(tf.reduce_all(tf.cast(merged_outputs[key] , tf.intaa ) == separated_outputs[key] ) )
@slow
def _lowerCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
lowerCamelCase_ = tf.function(UpperCamelCase__ )
for test_inputs in (self.test_sentences, self.paired_sentences):
lowerCamelCase_ = tf.constant(UpperCamelCase__ )
lowerCamelCase_ = compiled_tokenizer(UpperCamelCase__ )
lowerCamelCase_ = tf_tokenizer(UpperCamelCase__ )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def _lowerCAmelCase ( self ) -> int:
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
lowerCamelCase_ = ModelToSave(tokenizer=UpperCamelCase__ )
lowerCamelCase_ = tf.convert_to_tensor(self.test_sentences )
lowerCamelCase_ = model(UpperCamelCase__ ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
lowerCamelCase_ = Path(UpperCamelCase__ ) / '''saved.model'''
model.save(UpperCamelCase__ )
lowerCamelCase_ = tf.keras.models.load_model(UpperCamelCase__ )
lowerCamelCase_ = loaded_model(UpperCamelCase__ )
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertLessEqual(tf.reduce_max(tf.abs(out - loaded_output ) ) , 1e-5 ) | 66 | 0 |
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : str = logging.get_logger(__name__)
__lowerCAmelCase : Optional[int] = {
'''microsoft/wavlm-base''': '''https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json''',
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class _lowerCAmelCase ( A_ ):
"""simple docstring"""
_lowerCamelCase = '''wavlm'''
def __init__( self , _lowercase=3_2 , _lowercase=7_6_8 , _lowercase=1_2 , _lowercase=1_2 , _lowercase=3_0_7_2 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=0.1 , _lowercase=0.0 , _lowercase=0.1 , _lowercase=0.1 , _lowercase=0.02 , _lowercase=1E-5 , _lowercase="group" , _lowercase="gelu" , _lowercase=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , _lowercase=(5, 2, 2, 2, 2, 2, 2) , _lowercase=(1_0, 3, 3, 3, 3, 2, 2) , _lowercase=False , _lowercase=1_2_8 , _lowercase=1_6 , _lowercase=3_2_0 , _lowercase=8_0_0 , _lowercase=False , _lowercase=True , _lowercase=0.05 , _lowercase=1_0 , _lowercase=2 , _lowercase=0.0 , _lowercase=1_0 , _lowercase=3_2_0 , _lowercase=2 , _lowercase=0.1 , _lowercase=1_0_0 , _lowercase=2_5_6 , _lowercase=2_5_6 , _lowercase=0.1 , _lowercase="mean" , _lowercase=False , _lowercase=False , _lowercase=2_5_6 , _lowercase=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0) , _lowercase=(5, 3, 3, 1, 1) , _lowercase=(1, 2, 3, 1, 1) , _lowercase=5_1_2 , _lowercase=8_0 , _lowercase=0 , _lowercase=1 , _lowercase=2 , _lowercase=False , _lowercase=3 , _lowercase=2 , _lowercase=3 , _lowercase=None , **_lowercase , ) -> List[str]:
'''simple docstring'''
super().__init__(**_lowercase , pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase )
snake_case_ : Tuple = hidden_size
snake_case_ : List[Any] = feat_extract_norm
snake_case_ : int = feat_extract_activation
snake_case_ : List[str] = list(_lowercase )
snake_case_ : Union[str, Any] = list(_lowercase )
snake_case_ : List[str] = list(_lowercase )
snake_case_ : List[str] = conv_bias
snake_case_ : Optional[Any] = num_buckets
snake_case_ : List[str] = max_bucket_distance
snake_case_ : Dict = num_conv_pos_embeddings
snake_case_ : int = num_conv_pos_embedding_groups
snake_case_ : Optional[Any] = len(self.conv_dim )
snake_case_ : List[str] = num_hidden_layers
snake_case_ : Optional[Any] = intermediate_size
snake_case_ : List[Any] = hidden_act
snake_case_ : Optional[Any] = num_attention_heads
snake_case_ : Optional[Any] = hidden_dropout
snake_case_ : Optional[Any] = attention_dropout
snake_case_ : Any = activation_dropout
snake_case_ : Union[str, Any] = feat_proj_dropout
snake_case_ : Optional[Any] = final_dropout
snake_case_ : Tuple = layerdrop
snake_case_ : str = layer_norm_eps
snake_case_ : str = initializer_range
snake_case_ : int = num_ctc_classes
snake_case_ : Optional[Any] = vocab_size
snake_case_ : Union[str, Any] = do_stable_layer_norm
snake_case_ : Tuple = use_weighted_layer_sum
snake_case_ : List[str] = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
f' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'
f' `len(config.conv_kernel) = {len(self.conv_kernel )}`.' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
snake_case_ : Optional[Any] = apply_spec_augment
snake_case_ : Dict = mask_time_prob
snake_case_ : str = mask_time_length
snake_case_ : Optional[int] = mask_time_min_masks
snake_case_ : int = mask_feature_prob
snake_case_ : Optional[int] = mask_feature_length
# parameters for pretraining with codevector quantized representations
snake_case_ : Optional[Any] = num_codevectors_per_group
snake_case_ : Dict = num_codevector_groups
snake_case_ : Optional[Any] = contrastive_logits_temperature
snake_case_ : Tuple = num_negatives
snake_case_ : Union[str, Any] = codevector_dim
snake_case_ : Any = proj_codevector_dim
snake_case_ : Any = diversity_loss_weight
# ctc loss
snake_case_ : str = ctc_loss_reduction
snake_case_ : int = ctc_zero_infinity
# adapter
snake_case_ : Any = add_adapter
snake_case_ : Optional[int] = adapter_kernel_size
snake_case_ : Dict = adapter_stride
snake_case_ : List[Any] = num_adapter_layers
snake_case_ : List[Any] = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
snake_case_ : Dict = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
snake_case_ : int = list(_lowercase )
snake_case_ : Optional[int] = list(_lowercase )
snake_case_ : int = list(_lowercase )
snake_case_ : Dict = xvector_output_dim
@property
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 58 |
from __future__ import annotations
from collections.abc import Callable
from typing import Generic, TypeVar
lowerCAmelCase = TypeVar('''T''')
lowerCAmelCase = TypeVar('''U''')
class A ( Generic[T, U] ):
def __init__(self , lowerCAmelCase , lowerCAmelCase ):
__lowercase= key
__lowercase= val
__lowercase= None
__lowercase= None
def __repr__(self ):
return (
f'Node: key: {self.key}, val: {self.val}, '
f'has next: {bool(self.next )}, has prev: {bool(self.prev )}'
)
class A ( Generic[T, U] ):
def __init__(self ):
__lowercase= DoubleLinkedListNode(lowerCAmelCase , lowerCAmelCase )
__lowercase= DoubleLinkedListNode(lowerCAmelCase , lowerCAmelCase )
__lowercase, __lowercase= self.rear, self.head
def __repr__(self ):
__lowercase= ['DoubleLinkedList']
__lowercase= self.head
while node.next is not None:
rep.append(str(lowerCAmelCase ) )
__lowercase= node.next
rep.append(str(self.rear ) )
return ",\n ".join(lowerCAmelCase )
def _A (self , lowerCAmelCase ):
__lowercase= self.rear.prev
# All nodes other than self.head are guaranteed to have non-None previous
assert previous is not None
__lowercase= node
__lowercase= previous
__lowercase= node
__lowercase= self.rear
def _A (self , lowerCAmelCase ):
if node.prev is None or node.next is None:
return None
__lowercase= node.next
__lowercase= node.prev
__lowercase= None
__lowercase= None
return node
class A ( Generic[T, U] ):
UpperCamelCase_ : dict[Callable[[T], U], LRUCache[T, U]] ={}
def __init__(self , lowerCAmelCase ):
__lowercase= DoubleLinkedList()
__lowercase= capacity
__lowercase= 0
__lowercase= 0
__lowercase= 0
__lowercase= {}
def __repr__(self ):
return (
f'CacheInfo(hits={self.hits}, misses={self.miss}, '
f'capacity={self.capacity}, current size={self.num_keys})'
)
def __contains__(self , lowerCAmelCase ):
return key in self.cache
def _A (self , lowerCAmelCase ):
# Note: pythonic interface would throw KeyError rather than return None
if key in self.cache:
self.hits += 1
__lowercase= self.cache[key]
__lowercase= self.list.remove(self.cache[key] )
assert node == value_node
# node is guaranteed not None because it is in self.cache
assert node is not None
self.list.add(lowerCAmelCase )
return node.val
self.miss += 1
return None
def _A (self , lowerCAmelCase , lowerCAmelCase ):
if key not in self.cache:
if self.num_keys >= self.capacity:
# delete first node (oldest) when over capacity
__lowercase= self.list.head.next
# guaranteed to have a non-None first node when num_keys > 0
# explain to type checker via assertions
assert first_node is not None
assert first_node.key is not None
assert (
self.list.remove(lowerCAmelCase ) is not None
) # node guaranteed to be in list assert node.key is not None
del self.cache[first_node.key]
self.num_keys -= 1
__lowercase= DoubleLinkedListNode(lowerCAmelCase , lowerCAmelCase )
self.list.add(self.cache[key] )
self.num_keys += 1
else:
# bump node to the end of the list, update value
__lowercase= self.list.remove(self.cache[key] )
assert node is not None # node guaranteed to be in list
__lowercase= value
self.list.add(lowerCAmelCase )
@classmethod
def _A (cls , lowerCAmelCase = 1_2_8 ):
def cache_decorator_inner(lowerCAmelCase ) -> Callable[..., U]:
def cache_decorator_wrapper(*lowerCAmelCase ) -> U:
if func not in cls.decorator_function_to_instance_map:
__lowercase= LRUCache(lowerCAmelCase )
__lowercase= cls.decorator_function_to_instance_map[func].get(args[0] )
if result is None:
__lowercase= func(*lowerCAmelCase )
cls.decorator_function_to_instance_map[func].put(args[0] , lowerCAmelCase )
return result
def cache_info() -> LRUCache[T, U]:
return cls.decorator_function_to_instance_map[func]
setattr(lowerCAmelCase , 'cache_info' , lowerCAmelCase ) # noqa: B010
return cache_decorator_wrapper
return cache_decorator_inner
if __name__ == "__main__":
import doctest
doctest.testmod()
| 230 | 0 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def _a ( self ) -> List[Any]:
__UpperCamelCase =AutoModelForSeqaSeqLM.from_pretrained('google/mt5-small' , return_dict=A_ ).to(A_ )
__UpperCamelCase =AutoTokenizer.from_pretrained('google/mt5-small' )
__UpperCamelCase =tokenizer('Hello there' , return_tensors='pt' ).input_ids
__UpperCamelCase =tokenizer('Hi I am' , return_tensors='pt' ).input_ids
__UpperCamelCase =model(input_ids.to(A_ ) , labels=labels.to(A_ ) ).loss
__UpperCamelCase =-(labels.shape[-1] * loss.item())
__UpperCamelCase =-84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 716 |
from unittest.mock import Mock, patch
from file_transfer.send_file import send_file
@patch('socket.socket' )
@patch('builtins.open' )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[Any] ):
# ===== initialization =====
__UpperCamelCase =Mock()
__UpperCamelCase =conn, Mock()
__UpperCamelCase =iter([1, None] )
__UpperCamelCase =lambda SCREAMING_SNAKE_CASE__ : next(SCREAMING_SNAKE_CASE__ )
# ===== invoke =====
send_file(filename='mytext.txt' , testing=SCREAMING_SNAKE_CASE__ )
# ===== ensurance =====
sock.assert_called_once()
sock.return_value.bind.assert_called_once()
sock.return_value.listen.assert_called_once()
sock.return_value.accept.assert_called_once()
conn.recv.assert_called_once()
file.return_value.__enter__.assert_called_once()
file.return_value.__enter__.return_value.read.assert_called()
conn.send.assert_called_once()
conn.close.assert_called_once()
sock.return_value.shutdown.assert_called_once()
sock.return_value.close.assert_called_once()
| 682 | 0 |
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCamelCase_ :
'''simple docstring'''
def __init__( self : str , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : int=13 , UpperCAmelCase__ : Dict=32 , UpperCAmelCase__ : Any=3 , UpperCAmelCase__ : Optional[Any]=4 , UpperCAmelCase__ : int=[10, 20, 30, 40] , UpperCAmelCase__ : Tuple=[2, 2, 3, 2] , UpperCAmelCase__ : Union[str, Any]=True , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : List[str]=37 , UpperCAmelCase__ : Optional[Any]="gelu" , UpperCAmelCase__ : Optional[int]=10 , UpperCAmelCase__ : str=0.02 , UpperCAmelCase__ : Optional[Any]=["stage2", "stage3", "stage4"] , UpperCAmelCase__ : Tuple=[2, 3, 4] , UpperCAmelCase__ : Any=None , ) ->Dict:
'''simple docstring'''
A__ = parent
A__ = batch_size
A__ = image_size
A__ = num_channels
A__ = num_stages
A__ = hidden_sizes
A__ = depths
A__ = is_training
A__ = use_labels
A__ = intermediate_size
A__ = hidden_act
A__ = num_labels
A__ = initializer_range
A__ = out_features
A__ = out_indices
A__ = scope
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->Union[str, Any]:
'''simple docstring'''
A__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size] , self.num_labels)
A__ = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Dict:
'''simple docstring'''
return ConvNextConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def SCREAMING_SNAKE_CASE ( self : Tuple , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Dict) ->Optional[int]:
'''simple docstring'''
A__ = ConvNextModel(config=UpperCAmelCase__)
model.to(UpperCAmelCase__)
model.eval()
A__ = model(UpperCAmelCase__)
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : str , UpperCAmelCase__ : List[str]) ->str:
'''simple docstring'''
A__ = ConvNextForImageClassification(UpperCAmelCase__)
model.to(UpperCAmelCase__)
model.eval()
A__ = model(UpperCAmelCase__ , labels=UpperCAmelCase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def SCREAMING_SNAKE_CASE ( self : Tuple , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : str) ->Optional[Any]:
'''simple docstring'''
A__ = ConvNextBackbone(config=UpperCAmelCase__)
model.to(UpperCAmelCase__)
model.eval()
A__ = model(UpperCAmelCase__)
# verify hidden states
self.parent.assertEqual(len(result.feature_maps) , len(config.out_features))
self.parent.assertListEqual(list(result.feature_maps[0].shape) , [self.batch_size, self.hidden_sizes[1], 4, 4])
# verify channels
self.parent.assertEqual(len(model.channels) , len(config.out_features))
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:])
# verify backbone works with out_features=None
A__ = None
A__ = ConvNextBackbone(config=UpperCAmelCase__)
model.to(UpperCAmelCase__)
model.eval()
A__ = model(UpperCAmelCase__)
# verify feature maps
self.parent.assertEqual(len(result.feature_maps) , 1)
self.parent.assertListEqual(list(result.feature_maps[0].shape) , [self.batch_size, self.hidden_sizes[-1], 1, 1])
# verify channels
self.parent.assertEqual(len(model.channels) , 1)
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]])
def SCREAMING_SNAKE_CASE ( self : Dict) ->Optional[int]:
'''simple docstring'''
A__ = self.prepare_config_and_inputs()
A__ , A__ , A__ = config_and_inputs
A__ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__ = (
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
UpperCAmelCase__ = (
{'''feature-extraction''': ConvNextModel, '''image-classification''': ConvNextForImageClassification}
if is_torch_available()
else {}
)
UpperCAmelCase__ = True
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
def SCREAMING_SNAKE_CASE ( self : Dict) ->Any:
'''simple docstring'''
A__ = ConvNextModelTester(self)
A__ = ConfigTester(self , config_class=UpperCAmelCase__ , has_text_modality=UpperCAmelCase__ , hidden_size=37)
def SCREAMING_SNAKE_CASE ( self : int) ->Dict:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def SCREAMING_SNAKE_CASE ( self : Tuple) ->str:
'''simple docstring'''
return
@unittest.skip(reason='''ConvNext does not use inputs_embeds''')
def SCREAMING_SNAKE_CASE ( self : str) ->str:
'''simple docstring'''
pass
@unittest.skip(reason='''ConvNext does not support input and output embeddings''')
def SCREAMING_SNAKE_CASE ( self : Tuple) ->List[Any]:
'''simple docstring'''
pass
@unittest.skip(reason='''ConvNext does not use feedforward chunking''')
def SCREAMING_SNAKE_CASE ( self : int) ->List[Any]:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self : List[str]) ->List[Any]:
'''simple docstring'''
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(UpperCAmelCase__)
A__ = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ = [*signature.parameters.keys()]
A__ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : int) ->List[str]:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->int:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Any) ->Tuple:
'''simple docstring'''
def check_hidden_states_output(UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : int):
A__ = model_class(UpperCAmelCase__)
model.to(UpperCAmelCase__)
model.eval()
with torch.no_grad():
A__ = model(**self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__))
A__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
A__ = self.model_tester.num_stages
self.assertEqual(len(UpperCAmelCase__) , expected_num_stages + 1)
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = True
check_hidden_states_output(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A__ = True
check_hidden_states_output(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Tuple) ->int:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase__)
@slow
def SCREAMING_SNAKE_CASE ( self : List[str]) ->Optional[int]:
'''simple docstring'''
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = ConvNextModel.from_pretrained(UpperCAmelCase__)
self.assertIsNotNone(UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( ) -> Tuple:
"""simple docstring"""
A__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Tuple:
'''simple docstring'''
return AutoImageProcessor.from_pretrained('''facebook/convnext-tiny-224''') if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->Dict:
'''simple docstring'''
A__ = ConvNextForImageClassification.from_pretrained('''facebook/convnext-tiny-224''').to(UpperCAmelCase__)
A__ = self.default_image_processor
A__ = prepare_img()
A__ = image_processor(images=UpperCAmelCase__ , return_tensors='''pt''').to(UpperCAmelCase__)
# forward pass
with torch.no_grad():
A__ = model(**UpperCAmelCase__)
# verify the logits
A__ = torch.Size((1, 1_000))
self.assertEqual(outputs.logits.shape , UpperCAmelCase__)
A__ = torch.tensor([-0.0260, -0.4739, 0.1911]).to(UpperCAmelCase__)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase__ , atol=1e-4))
@require_torch
class UpperCamelCase_ ( unittest.TestCase , UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = (ConvNextBackbone,) if is_torch_available() else ()
UpperCAmelCase__ = ConvNextConfig
UpperCAmelCase__ = False
def SCREAMING_SNAKE_CASE ( self : Any) ->Optional[int]:
'''simple docstring'''
A__ = ConvNextModelTester(self)
| 87 |
from __future__ import annotations
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self : Optional[Any] , lowerCamelCase : list[list[int]] ) -> Any:
"""simple docstring"""
_UpperCAmelCase = TypeError(
"""Matrices must be formed from a list of zero or more lists containing at """
"""least one and the same number of values, each of which must be of type """
"""int or float.""" )
if len(lowerCamelCase ) != 0:
_UpperCAmelCase = len(rows[0] )
if cols == 0:
raise error
for row in rows:
if len(lowerCamelCase ) != cols:
raise error
for value in row:
if not isinstance(lowerCamelCase , (int, float) ):
raise error
_UpperCAmelCase = rows
else:
_UpperCAmelCase = []
def lowerCamelCase ( self : Optional[Any] ) -> list[list[int]]:
"""simple docstring"""
return [[row[i] for row in self.rows] for i in range(len(self.rows[0] ) )]
@property
def lowerCamelCase ( self : int ) -> int:
"""simple docstring"""
return len(self.rows )
@property
def lowerCamelCase ( self : int ) -> int:
"""simple docstring"""
return len(self.rows[0] )
@property
def lowerCamelCase ( self : Tuple ) -> tuple[int, int]:
"""simple docstring"""
return (self.num_rows, self.num_columns)
@property
def lowerCamelCase ( self : Tuple ) -> bool:
"""simple docstring"""
return self.order[0] == self.order[1]
def lowerCamelCase ( self : Optional[int] ) -> Matrix:
"""simple docstring"""
_UpperCAmelCase = [
[0 if column_num != row_num else 1 for column_num in range(self.num_rows )]
for row_num in range(self.num_rows )
]
return Matrix(lowerCamelCase )
def lowerCamelCase ( self : Optional[Any] ) -> int:
"""simple docstring"""
if not self.is_square:
return 0
if self.order == (0, 0):
return 1
if self.order == (1, 1):
return int(self.rows[0][0] )
if self.order == (2, 2):
return int(
(self.rows[0][0] * self.rows[1][1])
- (self.rows[0][1] * self.rows[1][0]) )
else:
return sum(
self.rows[0][column] * self.cofactors().rows[0][column]
for column in range(self.num_columns ) )
def lowerCamelCase ( self : Optional[Any] ) -> bool:
"""simple docstring"""
return bool(self.determinant() )
def lowerCamelCase ( self : Tuple , lowerCamelCase : int , lowerCamelCase : int ) -> int:
"""simple docstring"""
_UpperCAmelCase = [
[
self.rows[other_row][other_column]
for other_column in range(self.num_columns )
if other_column != column
]
for other_row in range(self.num_rows )
if other_row != row
]
return Matrix(lowerCamelCase ).determinant()
def lowerCamelCase ( self : int , lowerCamelCase : int , lowerCamelCase : int ) -> int:
"""simple docstring"""
if (row + column) % 2 == 0:
return self.get_minor(lowerCamelCase , lowerCamelCase )
return -1 * self.get_minor(lowerCamelCase , lowerCamelCase )
def lowerCamelCase ( self : str ) -> Matrix:
"""simple docstring"""
return Matrix(
[
[self.get_minor(lowerCamelCase , lowerCamelCase ) for column in range(self.num_columns )]
for row in range(self.num_rows )
] )
def lowerCamelCase ( self : Dict ) -> Matrix:
"""simple docstring"""
return Matrix(
[
[
self.minors().rows[row][column]
if (row + column) % 2 == 0
else self.minors().rows[row][column] * -1
for column in range(self.minors().num_columns )
]
for row in range(self.minors().num_rows )
] )
def lowerCamelCase ( self : Optional[Any] ) -> Matrix:
"""simple docstring"""
_UpperCAmelCase = [
[self.cofactors().rows[column][row] for column in range(self.num_columns )]
for row in range(self.num_rows )
]
return Matrix(lowerCamelCase )
def lowerCamelCase ( self : Any ) -> Matrix:
"""simple docstring"""
_UpperCAmelCase = self.determinant()
if not determinant:
raise TypeError("""Only matrices with a non-zero determinant have an inverse""" )
return self.adjugate() * (1 / determinant)
def __repr__( self : List[str] ) -> str:
"""simple docstring"""
return str(self.rows )
def __str__( self : Optional[Any] ) -> str:
"""simple docstring"""
if self.num_rows == 0:
return "[]"
if self.num_rows == 1:
return "[[" + ". ".join(str(self.rows[0] ) ) + "]]"
return (
"["
+ "\n ".join(
[
"""[""" + """. """.join([str(lowerCamelCase ) for value in row] ) + """.]"""
for row in self.rows
] )
+ "]"
)
def lowerCamelCase ( self : Tuple , lowerCamelCase : list[int] , lowerCamelCase : int | None = None ) -> None:
"""simple docstring"""
_UpperCAmelCase = TypeError("""Row must be a list containing all ints and/or floats""" )
if not isinstance(lowerCamelCase , lowerCamelCase ):
raise type_error
for value in row:
if not isinstance(lowerCamelCase , (int, float) ):
raise type_error
if len(lowerCamelCase ) != self.num_columns:
raise ValueError(
"""Row must be equal in length to the other rows in the matrix""" )
if position is None:
self.rows.append(lowerCamelCase )
else:
_UpperCAmelCase = self.rows[0:position] + [row] + self.rows[position:]
def lowerCamelCase ( self : List[Any] , lowerCamelCase : list[int] , lowerCamelCase : int | None = None ) -> None:
"""simple docstring"""
_UpperCAmelCase = TypeError(
"""Column must be a list containing all ints and/or floats""" )
if not isinstance(lowerCamelCase , lowerCamelCase ):
raise type_error
for value in column:
if not isinstance(lowerCamelCase , (int, float) ):
raise type_error
if len(lowerCamelCase ) != self.num_rows:
raise ValueError(
"""Column must be equal in length to the other columns in the matrix""" )
if position is None:
_UpperCAmelCase = [self.rows[i] + [column[i]] for i in range(self.num_rows )]
else:
_UpperCAmelCase = [
self.rows[i][0:position] + [column[i]] + self.rows[i][position:]
for i in range(self.num_rows )
]
def __eq__( self : Dict , lowerCamelCase : object ) -> bool:
"""simple docstring"""
if not isinstance(lowerCamelCase , lowerCamelCase ):
return NotImplemented
return self.rows == other.rows
def __ne__( self : List[str] , lowerCamelCase : object ) -> bool:
"""simple docstring"""
return not self == other
def __neg__( self : Union[str, Any] ) -> Matrix:
"""simple docstring"""
return self * -1
def __add__( self : Any , lowerCamelCase : Matrix ) -> Matrix:
"""simple docstring"""
if self.order != other.order:
raise ValueError("""Addition requires matrices of the same order""" )
return Matrix(
[
[self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __sub__( self : Optional[Any] , lowerCamelCase : Matrix ) -> Matrix:
"""simple docstring"""
if self.order != other.order:
raise ValueError("""Subtraction requires matrices of the same order""" )
return Matrix(
[
[self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __mul__( self : List[Any] , lowerCamelCase : Matrix | int | float ) -> Matrix:
"""simple docstring"""
if isinstance(lowerCamelCase , (int, float) ):
return Matrix(
[[int(element * other ) for element in row] for row in self.rows] )
elif isinstance(lowerCamelCase , lowerCamelCase ):
if self.num_columns != other.num_rows:
raise ValueError(
"""The number of columns in the first matrix must """
"""be equal to the number of rows in the second""" )
return Matrix(
[
[Matrix.dot_product(lowerCamelCase , lowerCamelCase ) for column in other.columns()]
for row in self.rows
] )
else:
raise TypeError(
"""A Matrix can only be multiplied by an int, float, or another matrix""" )
def __pow__( self : Any , lowerCamelCase : int ) -> Matrix:
"""simple docstring"""
if not isinstance(lowerCamelCase , lowerCamelCase ):
raise TypeError("""A Matrix can only be raised to the power of an int""" )
if not self.is_square:
raise ValueError("""Only square matrices can be raised to a power""" )
if other == 0:
return self.identity()
if other < 0:
if self.is_invertable():
return self.inverse() ** (-other)
raise ValueError(
"""Only invertable matrices can be raised to a negative power""" )
_UpperCAmelCase = self
for _ in range(other - 1 ):
result *= self
return result
@classmethod
def lowerCamelCase ( cls : Tuple , lowerCamelCase : list[int] , lowerCamelCase : list[int] ) -> int:
"""simple docstring"""
return sum(row[i] * column[i] for i in range(len(lowerCamelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 108 | 0 |
'''simple docstring'''
import argparse
import json
import torch
from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel
def lowerCAmelCase__ ( UpperCAmelCase , UpperCAmelCase=1 ):
"""simple docstring"""
if n_shave_prefix_segments >= 0:
return ".".join(path.split(""".""" )[n_shave_prefix_segments:] )
else:
return ".".join(path.split(""".""" )[:n_shave_prefix_segments] )
def lowerCAmelCase__ ( UpperCAmelCase , UpperCAmelCase=0 ):
"""simple docstring"""
snake_case__ : Optional[Any] = []
for old_item in old_list:
snake_case__ : Union[str, Any] = old_item.replace("""in_layers.0""" , """norm1""" )
snake_case__ : List[Any] = new_item.replace("""in_layers.2""" , """conv1""" )
snake_case__ : Tuple = new_item.replace("""out_layers.0""" , """norm2""" )
snake_case__ : Dict = new_item.replace("""out_layers.3""" , """conv2""" )
snake_case__ : int = new_item.replace("""emb_layers.1""" , """time_emb_proj""" )
snake_case__ : Optional[Any] = new_item.replace("""skip_connection""" , """conv_shortcut""" )
snake_case__ : str = shave_segments(snake_case__ , n_shave_prefix_segments=snake_case__ )
mapping.append({"""old""": old_item, """new""": new_item} )
return mapping
def lowerCAmelCase__ ( UpperCAmelCase , UpperCAmelCase=0 ):
"""simple docstring"""
snake_case__ : Dict = []
for old_item in old_list:
snake_case__ : Dict = old_item
snake_case__ : int = new_item.replace("""norm.weight""" , """group_norm.weight""" )
snake_case__ : str = new_item.replace("""norm.bias""" , """group_norm.bias""" )
snake_case__ : Optional[Any] = new_item.replace("""proj_out.weight""" , """proj_attn.weight""" )
snake_case__ : Optional[Any] = new_item.replace("""proj_out.bias""" , """proj_attn.bias""" )
snake_case__ : Optional[Any] = shave_segments(snake_case__ , n_shave_prefix_segments=snake_case__ )
mapping.append({"""old""": old_item, """new""": new_item} )
return mapping
def lowerCAmelCase__ ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None ):
"""simple docstring"""
assert isinstance(snake_case__ , snake_case__ ), "Paths should be a list of dicts containing 'old' and 'new' keys."
# Splits the attention layers into three variables.
if attention_paths_to_split is not None:
for path, path_map in attention_paths_to_split.items():
snake_case__ : Union[str, Any] = old_checkpoint[path]
snake_case__ : Optional[int] = old_tensor.shape[0] // 3
snake_case__ : List[Any] = (-1, channels) if len(old_tensor.shape ) == 3 else (-1)
snake_case__ : Union[str, Any] = old_tensor.shape[0] // config["""num_head_channels"""] // 3
snake_case__ : Any = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] )
snake_case__ : List[str] = old_tensor.split(channels // num_heads , dim=1 )
snake_case__ : Union[str, Any] = query.reshape(snake_case__ )
snake_case__ : Tuple = key.reshape(snake_case__ )
snake_case__ : Any = value.reshape(snake_case__ )
for path in paths:
snake_case__ : List[Any] = path["""new"""]
# These have already been assigned
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
continue
# Global renaming happens here
snake_case__ : Union[str, Any] = new_path.replace("""middle_block.0""" , """mid_block.resnets.0""" )
snake_case__ : str = new_path.replace("""middle_block.1""" , """mid_block.attentions.0""" )
snake_case__ : Any = new_path.replace("""middle_block.2""" , """mid_block.resnets.1""" )
if additional_replacements is not None:
for replacement in additional_replacements:
snake_case__ : int = new_path.replace(replacement["""old"""] , replacement["""new"""] )
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path:
snake_case__ : Dict = old_checkpoint[path["""old"""]][:, :, 0]
else:
snake_case__ : Optional[Any] = old_checkpoint[path["""old"""]]
def lowerCAmelCase__ ( UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
snake_case__ : int = {}
snake_case__ : Tuple = checkpoint["""time_embed.0.weight"""]
snake_case__ : List[str] = checkpoint["""time_embed.0.bias"""]
snake_case__ : List[str] = checkpoint["""time_embed.2.weight"""]
snake_case__ : Tuple = checkpoint["""time_embed.2.bias"""]
snake_case__ : Dict = checkpoint["""input_blocks.0.0.weight"""]
snake_case__ : List[Any] = checkpoint["""input_blocks.0.0.bias"""]
snake_case__ : List[Any] = checkpoint["""out.0.weight"""]
snake_case__ : Any = checkpoint["""out.0.bias"""]
snake_case__ : Any = checkpoint["""out.2.weight"""]
snake_case__ : List[str] = checkpoint["""out.2.bias"""]
# Retrieves the keys for the input blocks only
snake_case__ : List[str] = len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """input_blocks""" in layer} )
snake_case__ : Any = {
layer_id: [key for key in checkpoint if f"""input_blocks.{layer_id}""" in key]
for layer_id in range(snake_case__ )
}
# Retrieves the keys for the middle blocks only
snake_case__ : Optional[int] = len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """middle_block""" in layer} )
snake_case__ : Optional[int] = {
layer_id: [key for key in checkpoint if f"""middle_block.{layer_id}""" in key]
for layer_id in range(snake_case__ )
}
# Retrieves the keys for the output blocks only
snake_case__ : str = len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """output_blocks""" in layer} )
snake_case__ : List[Any] = {
layer_id: [key for key in checkpoint if f"""output_blocks.{layer_id}""" in key]
for layer_id in range(snake_case__ )
}
for i in range(1 , snake_case__ ):
snake_case__ : Union[str, Any] = (i - 1) // (config["""num_res_blocks"""] + 1)
snake_case__ : int = (i - 1) % (config["""num_res_blocks"""] + 1)
snake_case__ : List[str] = [key for key in input_blocks[i] if f"""input_blocks.{i}.0""" in key]
snake_case__ : str = [key for key in input_blocks[i] if f"""input_blocks.{i}.1""" in key]
if f"""input_blocks.{i}.0.op.weight""" in checkpoint:
snake_case__ : Union[str, Any] = checkpoint[
f"""input_blocks.{i}.0.op.weight"""
]
snake_case__ : Dict = checkpoint[
f"""input_blocks.{i}.0.op.bias"""
]
continue
snake_case__ : Optional[int] = renew_resnet_paths(snake_case__ )
snake_case__ : int = {"""old""": f"""input_blocks.{i}.0""", """new""": f"""down_blocks.{block_id}.resnets.{layer_in_block_id}"""}
snake_case__ : Tuple = {"""old""": """resnets.2.op""", """new""": """downsamplers.0.op"""}
assign_to_checkpoint(
snake_case__ , snake_case__ , snake_case__ , additional_replacements=[meta_path, resnet_op] , config=snake_case__ )
if len(snake_case__ ):
snake_case__ : str = renew_attention_paths(snake_case__ )
snake_case__ : List[str] = {
"""old""": f"""input_blocks.{i}.1""",
"""new""": f"""down_blocks.{block_id}.attentions.{layer_in_block_id}""",
}
snake_case__ : Optional[int] = {
f"""input_blocks.{i}.1.qkv.bias""": {
"""key""": f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias""",
"""query""": f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias""",
"""value""": f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias""",
},
f"""input_blocks.{i}.1.qkv.weight""": {
"""key""": f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight""",
"""query""": f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight""",
"""value""": f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight""",
},
}
assign_to_checkpoint(
snake_case__ , snake_case__ , snake_case__ , additional_replacements=[meta_path] , attention_paths_to_split=snake_case__ , config=snake_case__ , )
snake_case__ : int = middle_blocks[0]
snake_case__ : List[str] = middle_blocks[1]
snake_case__ : Any = middle_blocks[2]
snake_case__ : Dict = renew_resnet_paths(snake_case__ )
assign_to_checkpoint(snake_case__ , snake_case__ , snake_case__ , config=snake_case__ )
snake_case__ : Any = renew_resnet_paths(snake_case__ )
assign_to_checkpoint(snake_case__ , snake_case__ , snake_case__ , config=snake_case__ )
snake_case__ : Dict = renew_attention_paths(snake_case__ )
snake_case__ : Tuple = {
"""middle_block.1.qkv.bias""": {
"""key""": """mid_block.attentions.0.key.bias""",
"""query""": """mid_block.attentions.0.query.bias""",
"""value""": """mid_block.attentions.0.value.bias""",
},
"""middle_block.1.qkv.weight""": {
"""key""": """mid_block.attentions.0.key.weight""",
"""query""": """mid_block.attentions.0.query.weight""",
"""value""": """mid_block.attentions.0.value.weight""",
},
}
assign_to_checkpoint(
snake_case__ , snake_case__ , snake_case__ , attention_paths_to_split=snake_case__ , config=snake_case__ )
for i in range(snake_case__ ):
snake_case__ : Optional[Any] = i // (config["""num_res_blocks"""] + 1)
snake_case__ : Dict = i % (config["""num_res_blocks"""] + 1)
snake_case__ : List[str] = [shave_segments(snake_case__ , 2 ) for name in output_blocks[i]]
snake_case__ : Any = {}
for layer in output_block_layers:
snake_case__ : Any = layer.split(""".""" )[0], shave_segments(snake_case__ , 1 )
if layer_id in output_block_list:
output_block_list[layer_id].append(snake_case__ )
else:
snake_case__ : str = [layer_name]
if len(snake_case__ ) > 1:
snake_case__ : Dict = [key for key in output_blocks[i] if f"""output_blocks.{i}.0""" in key]
snake_case__ : List[str] = [key for key in output_blocks[i] if f"""output_blocks.{i}.1""" in key]
snake_case__ : List[Any] = renew_resnet_paths(snake_case__ )
snake_case__ : int = renew_resnet_paths(snake_case__ )
snake_case__ : Optional[Any] = {"""old""": f"""output_blocks.{i}.0""", """new""": f"""up_blocks.{block_id}.resnets.{layer_in_block_id}"""}
assign_to_checkpoint(snake_case__ , snake_case__ , snake_case__ , additional_replacements=[meta_path] , config=snake_case__ )
if ["conv.weight", "conv.bias"] in output_block_list.values():
snake_case__ : str = list(output_block_list.values() ).index(["""conv.weight""", """conv.bias"""] )
snake_case__ : Any = checkpoint[
f"""output_blocks.{i}.{index}.conv.weight"""
]
snake_case__ : Optional[int] = checkpoint[
f"""output_blocks.{i}.{index}.conv.bias"""
]
# Clear attentions as they have been attributed above.
if len(snake_case__ ) == 2:
snake_case__ : Any = []
if len(snake_case__ ):
snake_case__ : str = renew_attention_paths(snake_case__ )
snake_case__ : str = {
"""old""": f"""output_blocks.{i}.1""",
"""new""": f"""up_blocks.{block_id}.attentions.{layer_in_block_id}""",
}
snake_case__ : int = {
f"""output_blocks.{i}.1.qkv.bias""": {
"""key""": f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias""",
"""query""": f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias""",
"""value""": f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias""",
},
f"""output_blocks.{i}.1.qkv.weight""": {
"""key""": f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight""",
"""query""": f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight""",
"""value""": f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight""",
},
}
assign_to_checkpoint(
snake_case__ , snake_case__ , snake_case__ , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any("""qkv""" in key for key in attentions ) else None , config=snake_case__ , )
else:
snake_case__ : Optional[Any] = renew_resnet_paths(snake_case__ , n_shave_prefix_segments=1 )
for path in resnet_0_paths:
snake_case__ : Optional[Any] = """.""".join(["""output_blocks""", str(snake_case__ ), path["""old"""]] )
snake_case__ : Optional[int] = """.""".join(["""up_blocks""", str(snake_case__ ), """resnets""", str(snake_case__ ), path["""new"""]] )
snake_case__ : Any = checkpoint[old_path]
return new_checkpoint
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the architecture.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = torch.load(args.checkpoint_path)
with open(args.config_file) as f:
lowerCAmelCase__ = json.loads(f.read())
lowerCAmelCase__ = convert_ldm_checkpoint(checkpoint, config)
if "ldm" in config:
del config["ldm"]
lowerCAmelCase__ = UNetaDModel(**config)
model.load_state_dict(converted_checkpoint)
try:
lowerCAmelCase__ = DDPMScheduler.from_config('/'.join(args.checkpoint_path.split('/')[:-1]))
lowerCAmelCase__ = VQModel.from_pretrained('/'.join(args.checkpoint_path.split('/')[:-1]))
lowerCAmelCase__ = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae)
pipe.save_pretrained(args.dump_path)
except: # noqa: E722
model.save_pretrained(args.dump_path)
| 706 |
'''simple docstring'''
import argparse
import json
import numpy
import torch
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def lowerCAmelCase__ ( UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
snake_case__ : Any = torch.load(UpperCAmelCase , map_location="""cpu""" )
snake_case__ : List[Any] = chkpt["""model"""]
# We have the base model one level deeper than the original XLM repository
snake_case__ : List[Any] = {}
for k, v in state_dict.items():
if "pred_layer" in k:
snake_case__ : Union[str, Any] = v
else:
snake_case__ : str = v
snake_case__ : Optional[int] = chkpt["""params"""]
snake_case__ : List[Any] = {n: v for n, v in config.items() if not isinstance(UpperCAmelCase , (torch.FloatTensor, numpy.ndarray) )}
snake_case__ : Any = chkpt["""dico_word2id"""]
snake_case__ : Optional[int] = {s + """</w>""" if s.find("""@@""" ) == -1 and i > 13 else s.replace("""@@""" , """""" ): i for s, i in vocab.items()}
# Save pytorch-model
snake_case__ : str = pytorch_dump_folder_path + """/""" + WEIGHTS_NAME
snake_case__ : Optional[int] = pytorch_dump_folder_path + """/""" + CONFIG_NAME
snake_case__ : str = pytorch_dump_folder_path + """/""" + VOCAB_FILES_NAMES["""vocab_file"""]
print(f"""Save PyTorch model to {pytorch_weights_dump_path}""" )
torch.save(UpperCAmelCase , UpperCAmelCase )
print(f"""Save configuration file to {pytorch_config_dump_path}""" )
with open(UpperCAmelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(UpperCAmelCase , indent=2 ) + """\n""" )
print(f"""Save vocab file to {pytorch_config_dump_path}""" )
with open(UpperCAmelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(UpperCAmelCase , indent=2 ) + """\n""" )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--xlm_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
lowerCAmelCase__ = parser.parse_args()
convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
| 172 | 0 |
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
_snake_case = logging.getLogger(__name__)
class UpperCAmelCase_ ( a):
def __init__( self, __a=-1):
'''simple docstring'''
_lowerCAmelCase : List[str] = label_idx
def snake_case__ ( self, __a, __a):
'''simple docstring'''
if isinstance(__a, __a):
_lowerCAmelCase : Optional[Any] = mode.value
_lowerCAmelCase : Optional[Any] = os.path.join(__a, f"{mode}.txt")
_lowerCAmelCase : Tuple = 1
_lowerCAmelCase : Dict = []
with open(__a, encoding="utf-8") as f:
_lowerCAmelCase : Dict = []
_lowerCAmelCase : Any = []
for line in f:
if line.startswith("-DOCSTART-") or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=f"{mode}-{guid_index}", words=__a, labels=__a))
guid_index += 1
_lowerCAmelCase : Optional[Any] = []
_lowerCAmelCase : Union[str, Any] = []
else:
_lowerCAmelCase : List[str] = line.split(" ")
words.append(splits[0])
if len(__a) > 1:
labels.append(splits[self.label_idx].replace("\n", ""))
else:
# Examples could have no label for mode = "test"
labels.append("O")
if words:
examples.append(InputExample(guid=f"{mode}-{guid_index}", words=__a, labels=__a))
return examples
def snake_case__ ( self, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : int = 0
for line in test_input_reader:
if line.startswith("-DOCSTART-") or line == "" or line == "\n":
writer.write(__a)
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
_lowerCAmelCase : List[str] = line.split()[0] + " " + preds_list[example_id].pop(0) + "\n"
writer.write(__a)
else:
logger.warning("Maximum sequence length exceeded: No prediction for '%s'.", line.split()[0])
def snake_case__ ( self, __a):
'''simple docstring'''
if path:
with open(__a, "r") as f:
_lowerCAmelCase : Union[str, Any] = f.read().splitlines()
if "O" not in labels:
_lowerCAmelCase : Optional[Any] = ["O"] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class UpperCAmelCase_ ( a):
def __init__( self):
'''simple docstring'''
super().__init__(label_idx=-2)
def snake_case__ ( self, __a):
'''simple docstring'''
if path:
with open(__a, "r") as f:
_lowerCAmelCase : List[Any] = f.read().splitlines()
if "O" not in labels:
_lowerCAmelCase : List[str] = ["O"] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class UpperCAmelCase_ ( a):
def snake_case__ ( self, __a, __a):
'''simple docstring'''
if isinstance(__a, __a):
_lowerCAmelCase : str = mode.value
_lowerCAmelCase : str = os.path.join(__a, f"{mode}.txt")
_lowerCAmelCase : Optional[Any] = 1
_lowerCAmelCase : Dict = []
with open(__a, encoding="utf-8") as f:
for sentence in parse_incr(__a):
_lowerCAmelCase : Any = []
_lowerCAmelCase : Optional[int] = []
for token in sentence:
words.append(token["form"])
labels.append(token["upos"])
assert len(__a) == len(__a)
if words:
examples.append(InputExample(guid=f"{mode}-{guid_index}", words=__a, labels=__a))
guid_index += 1
return examples
def snake_case__ ( self, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : List[str] = 0
for sentence in parse_incr(__a):
_lowerCAmelCase : str = preds_list[example_id]
_lowerCAmelCase : Any = ""
for token in sentence:
out += f"{token['form']} ({token['upos']}|{s_p.pop(0)}) "
out += "\n"
writer.write(__a)
example_id += 1
def snake_case__ ( self, __a):
'''simple docstring'''
if path:
with open(__a, "r") as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 500 |
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import KarrasVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 42
lowerCamelCase__ = 42
def __init__( self, __a, __a):
'''simple docstring'''
super().__init__()
self.register_modules(unet=__a, scheduler=__a)
@torch.no_grad()
def __call__( self, __a = 1, __a = 50, __a = None, __a = "pil", __a = True, **__a, ):
'''simple docstring'''
_lowerCAmelCase : List[str] = self.unet.config.sample_size
_lowerCAmelCase : Optional[Any] = (batch_size, 3, img_size, img_size)
_lowerCAmelCase : Any = self.unet
# sample x_0 ~ N(0, sigma_0^2 * I)
_lowerCAmelCase : Union[str, Any] = randn_tensor(__a, generator=__a, device=self.device) * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(__a)
for t in self.progress_bar(self.scheduler.timesteps):
# here sigma_t == t_i from the paper
_lowerCAmelCase : Optional[Any] = self.scheduler.schedule[t]
_lowerCAmelCase : int = self.scheduler.schedule[t - 1] if t > 0 else 0
# 1. Select temporarily increased noise level sigma_hat
# 2. Add new noise to move from sample_i to sample_hat
_lowerCAmelCase , _lowerCAmelCase : Dict = self.scheduler.add_noise_to_input(__a, __a, generator=__a)
# 3. Predict the noise residual given the noise magnitude `sigma_hat`
# The model inputs and output are adjusted by following eq. (213) in [1].
_lowerCAmelCase : Optional[int] = (sigma_hat / 2) * model((sample_hat + 1) / 2, sigma_hat / 2).sample
# 4. Evaluate dx/dt at sigma_hat
# 5. Take Euler step from sigma to sigma_prev
_lowerCAmelCase : Optional[int] = self.scheduler.step(__a, __a, __a, __a)
if sigma_prev != 0:
# 6. Apply 2nd order correction
# The model inputs and output are adjusted by following eq. (213) in [1].
_lowerCAmelCase : List[str] = (sigma_prev / 2) * model((step_output.prev_sample + 1) / 2, sigma_prev / 2).sample
_lowerCAmelCase : List[str] = self.scheduler.step_correct(
__a, __a, __a, __a, step_output.prev_sample, step_output["derivative"], )
_lowerCAmelCase : Optional[int] = step_output.prev_sample
_lowerCAmelCase : Tuple = (sample / 2 + 0.5).clamp(0, 1)
_lowerCAmelCase : int = sample.cpu().permute(0, 2, 3, 1).numpy()
if output_type == "pil":
_lowerCAmelCase : int = self.numpy_to_pil(__a)
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__a)
| 500 | 1 |
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {"vocab_file": "vocab.txt"}
UpperCAmelCase__ = {
"vocab_file": {
"openbmb/cpm-ant-10b": "https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt",
},
}
UpperCAmelCase__ = {
"openbmb/cpm-ant-10b": 1024,
}
def _A( UpperCamelCase__ : Tuple ) -> Optional[int]:
'''simple docstring'''
__lowercase = collections.OrderedDict()
with open(UpperCamelCase__ , '''r''' , encoding='''utf-8''' ) as reader:
__lowercase = reader.readlines()
for index, token in enumerate(UpperCamelCase__ ):
__lowercase = token.rstrip('''\n''' )
__lowercase = index
return vocab
class a ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self : Optional[int] , lowerCamelCase__ : int , lowerCamelCase__ : int="<unk>" , lowerCamelCase__ : List[str]=200 ) -> int:
"""simple docstring"""
__lowercase = vocab
__lowercase = unk_token
__lowercase = max_input_chars_per_word
def UpperCAmelCase_ ( self : Tuple , lowerCamelCase__ : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__lowercase = list(lowerCamelCase__ )
if len(lowerCamelCase__ ) > self.max_input_chars_per_word:
return [self.unk_token]
__lowercase = 0
__lowercase = []
while start < len(lowerCamelCase__ ):
__lowercase = len(lowerCamelCase__ )
__lowercase = None
while start < end:
__lowercase = ''''''.join(chars[start:end] )
if substr in self.vocab:
__lowercase = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(lowerCamelCase__ )
__lowercase = end
return sub_tokens
class a ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase_ : List[str] = VOCAB_FILES_NAMES
UpperCamelCase_ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ : int = ['input_ids', 'attention_mask']
UpperCamelCase_ : List[Any] = False
def __init__( self : Optional[Any] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : int="<d>" , lowerCamelCase__ : List[Any]="</d>" , lowerCamelCase__ : Optional[Any]="<s>" , lowerCamelCase__ : Optional[int]="</s>" , lowerCamelCase__ : Optional[Any]="<pad>" , lowerCamelCase__ : Dict="<unk>" , lowerCamelCase__ : List[str]="</n>" , lowerCamelCase__ : List[str]="</_>" , lowerCamelCase__ : Any="left" , **lowerCamelCase__ : Optional[Any] , ) -> List[str]:
"""simple docstring"""
requires_backends(self , ['''jieba'''] )
super().__init__(
bod_token=lowerCamelCase__ , eod_token=lowerCamelCase__ , bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , line_token=lowerCamelCase__ , space_token=lowerCamelCase__ , padding_side=lowerCamelCase__ , **lowerCamelCase__ , )
__lowercase = bod_token
__lowercase = eod_token
__lowercase = load_vocab(lowerCamelCase__ )
__lowercase = self.encoder[space_token]
__lowercase = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
__lowercase = collections.OrderedDict(sorted(self.encoder.items() , key=lambda lowerCamelCase__ : x[1] ) )
__lowercase = {v: k for k, v in self.encoder.items()}
__lowercase = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def UpperCAmelCase_ ( self : List[str] ) -> List[str]:
"""simple docstring"""
return self.encoder[self.bod_token]
@property
def UpperCAmelCase_ ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
return self.encoder[self.eod_token]
@property
def UpperCAmelCase_ ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
return self.encoder["\n"]
@property
def UpperCAmelCase_ ( self : str ) -> int:
"""simple docstring"""
return len(self.encoder )
def UpperCAmelCase_ ( self : Dict ) -> List[str]:
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def UpperCAmelCase_ ( self : List[Any] , lowerCamelCase__ : List[str] ) -> Optional[Any]:
"""simple docstring"""
__lowercase = []
for x in jieba.cut(lowerCamelCase__ , cut_all=lowerCamelCase__ ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(lowerCamelCase__ ) )
return output_tokens
def UpperCAmelCase_ ( self : List[str] , lowerCamelCase__ : str , **lowerCamelCase__ : int ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = [i for i in token_ids if i >= 0]
__lowercase = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(lowerCamelCase__ , **lowerCamelCase__ )
def UpperCAmelCase_ ( self : List[str] , lowerCamelCase__ : List[Any] ) -> Optional[int]:
"""simple docstring"""
return token in self.encoder
def UpperCAmelCase_ ( self : int , lowerCamelCase__ : List[str] ) -> str:
"""simple docstring"""
return "".join(lowerCamelCase__ )
def UpperCAmelCase_ ( self : Any , lowerCamelCase__ : Dict ) -> int:
"""simple docstring"""
return self.encoder.get(lowerCamelCase__ , self.encoder.get(self.unk_token ) )
def UpperCAmelCase_ ( self : Optional[int] , lowerCamelCase__ : Tuple ) -> Any:
"""simple docstring"""
return self.decoder.get(lowerCamelCase__ , self.unk_token )
def UpperCAmelCase_ ( self : Optional[Any] , lowerCamelCase__ : str , lowerCamelCase__ : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if os.path.isdir(lowerCamelCase__ ):
__lowercase = os.path.join(
lowerCamelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
else:
__lowercase = (filename_prefix + '''-''' if filename_prefix else '''''') + save_directory
__lowercase = 0
if " " in self.encoder:
__lowercase = self.encoder[''' ''']
del self.encoder[" "]
if "\n" in self.encoder:
__lowercase = self.encoder['''\n''']
del self.encoder["\n"]
__lowercase = collections.OrderedDict(sorted(self.encoder.items() , key=lambda lowerCamelCase__ : x[1] ) )
with open(lowerCamelCase__ , '''w''' , encoding='''utf-8''' ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
f'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'
''' Please check that the vocabulary is not corrupted!''' )
__lowercase = token_index
writer.write(token + '''\n''' )
index += 1
return (vocab_file,)
def UpperCAmelCase_ ( self : Any , lowerCamelCase__ : List[int] , lowerCamelCase__ : List[int] = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def UpperCAmelCase_ ( self : List[Any] , lowerCamelCase__ : List[int] , lowerCamelCase__ : Optional[List[int]] = None , lowerCamelCase__ : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase__ , token_ids_a=lowerCamelCase__ , already_has_special_tokens=lowerCamelCase__ )
if token_ids_a is not None:
return [1] + ([0] * len(lowerCamelCase__ )) + [1] + ([0] * len(lowerCamelCase__ ))
return [1] + ([0] * len(lowerCamelCase__ ))
| 709 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCAmelCase__ = {
"configuration_m2m_100": ["M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP", "M2M100Config", "M2M100OnnxConfig"],
"tokenization_m2m_100": ["M2M100Tokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST",
"M2M100ForConditionalGeneration",
"M2M100Model",
"M2M100PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 362 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
a_ = {
'configuration_speech_to_text': ['SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Speech2TextConfig'],
'processing_speech_to_text': ['Speech2TextProcessor'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['Speech2TextTokenizer']
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['Speech2TextFeatureExtractor']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFSpeech2TextForConditionalGeneration',
'TFSpeech2TextModel',
'TFSpeech2TextPreTrainedModel',
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'Speech2TextForConditionalGeneration',
'Speech2TextModel',
'Speech2TextPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 417 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
a_ = {
'configuration_blip': [
'BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BlipConfig',
'BlipTextConfig',
'BlipVisionConfig',
],
'processing_blip': ['BlipProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['BlipImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'BLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'BlipModel',
'BlipPreTrainedModel',
'BlipForConditionalGeneration',
'BlipForQuestionAnswering',
'BlipVisionModel',
'BlipTextModel',
'BlipForImageTextRetrieval',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFBlipModel',
'TFBlipPreTrainedModel',
'TFBlipForConditionalGeneration',
'TFBlipForQuestionAnswering',
'TFBlipVisionModel',
'TFBlipTextModel',
'TFBlipForImageTextRetrieval',
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 417 | 1 |
"""simple docstring"""
import string
# frequency taken from https://en.wikipedia.org/wiki/Letter_frequency
SCREAMING_SNAKE_CASE__ = {
"E": 12.70,
"T": 9.06,
"A": 8.17,
"O": 7.51,
"I": 6.97,
"N": 6.75,
"S": 6.33,
"H": 6.09,
"R": 5.99,
"D": 4.25,
"L": 4.03,
"C": 2.78,
"U": 2.76,
"M": 2.41,
"W": 2.36,
"F": 2.23,
"G": 2.02,
"Y": 1.97,
"P": 1.93,
"B": 1.29,
"V": 0.98,
"K": 0.77,
"J": 0.15,
"X": 0.15,
"Q": 0.10,
"Z": 0.07,
}
SCREAMING_SNAKE_CASE__ = "ETAOINSHRDLCUMWFGYPBVKJXQZ"
SCREAMING_SNAKE_CASE__ = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
def lowerCAmelCase__ ( _UpperCamelCase : str ) -> dict[str, int]:
"""simple docstring"""
snake_case = {letter: 0 for letter in string.ascii_uppercase}
for letter in message.upper():
if letter in LETTERS:
letter_count[letter] += 1
return letter_count
def lowerCAmelCase__ ( _UpperCamelCase : tuple ) -> str:
"""simple docstring"""
return x[0]
def lowerCAmelCase__ ( _UpperCamelCase : str ) -> str:
"""simple docstring"""
snake_case = get_letter_count(_UpperCamelCase )
snake_case = {
freq: [] for letter, freq in letter_to_freq.items()
}
for letter in LETTERS:
freq_to_letter[letter_to_freq[letter]].append(_UpperCamelCase )
snake_case = {}
for freq in freq_to_letter:
freq_to_letter[freq].sort(key=ETAOIN.find , reverse=_UpperCamelCase )
snake_case = ''.join(freq_to_letter[freq] )
snake_case = list(freq_to_letter_str.items() )
freq_pairs.sort(key=_UpperCamelCase , reverse=_UpperCamelCase )
snake_case = [freq_pair[1] for freq_pair in freq_pairs]
return "".join(_UpperCamelCase )
def lowerCAmelCase__ ( _UpperCamelCase : str ) -> int:
"""simple docstring"""
snake_case = get_frequency_order(_UpperCamelCase )
snake_case = 0
for common_letter in ETAOIN[:6]:
if common_letter in freq_order[:6]:
match_score += 1
for uncommon_letter in ETAOIN[-6:]:
if uncommon_letter in freq_order[-6:]:
match_score += 1
return match_score
if __name__ == "__main__":
import doctest
doctest.testmod()
| 701 | """simple docstring"""
import math
def lowerCAmelCase__ ( _UpperCamelCase : float , _UpperCamelCase : float ) -> float:
"""simple docstring"""
if initial_intensity < 0:
raise ValueError('The value of intensity cannot be negative' )
# handling of negative values of initial intensity
if angle < 0 or angle > 3_6_0:
raise ValueError('In Malus Law, the angle is in the range 0-360 degrees' )
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(_UpperCamelCase ) ) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name="malus_law")
| 104 | 0 |
import re
from typing import Callable, List, Optional, Union
import tensorflow as tf
try:
from tensorflow.keras.optimizers.legacy import Adam
except ImportError:
from tensorflow.keras.optimizers import Adam
class UpperCamelCase_ ( tf.keras.optimizers.schedules.LearningRateSchedule ):
def __init__( self : int , lowerCamelCase : Tuple , lowerCamelCase : int , lowerCamelCase : List[Any] , lowerCamelCase : List[Any] = 1.0 , lowerCamelCase : str = None , ):
super().__init__()
lowerCamelCase_ : List[str] = initial_learning_rate
lowerCamelCase_ : str = warmup_steps
lowerCamelCase_ : Union[str, Any] = power
lowerCamelCase_ : Tuple = decay_schedule_fn
lowerCamelCase_ : List[str] = name
def __call__( self : str , lowerCamelCase : str ):
with tf.name_scope(self.name or 'WarmUp' ) as name:
# Implements polynomial warmup. i.e., if global_step < warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
lowerCamelCase_ : Dict = tf.cast(lowerCamelCase , tf.floataa )
lowerCamelCase_ : Tuple = tf.cast(self.warmup_steps , tf.floataa )
lowerCamelCase_ : int = global_step_float / warmup_steps_float
lowerCamelCase_ : int = self.initial_learning_rate * tf.math.pow(lowerCamelCase , self.power )
return tf.cond(
global_step_float < warmup_steps_float , lambda: warmup_learning_rate , lambda: self.decay_schedule_fn(step - self.warmup_steps ) , name=lowerCamelCase , )
def __a ( self : Optional[Any] ):
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_schedule_fn": self.decay_schedule_fn,
"warmup_steps": self.warmup_steps,
"power": self.power,
"name": self.name,
}
def _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ = 0.0 ,lowerCAmelCase__ = 0.9 ,lowerCAmelCase__ = 0.999 ,lowerCAmelCase__ = 1e-8 ,lowerCAmelCase__ = None ,lowerCAmelCase__ = None ,lowerCAmelCase__ = 0.0 ,lowerCAmelCase__ = 1.0 ,lowerCAmelCase__ = None ,):
lowerCamelCase_ : int = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=lowerCAmelCase__ ,decay_steps=num_train_steps - num_warmup_steps ,end_learning_rate=init_lr * min_lr_ratio ,power=lowerCAmelCase__ ,)
if num_warmup_steps:
lowerCamelCase_ : Union[str, Any] = WarmUp(
initial_learning_rate=lowerCAmelCase__ ,decay_schedule_fn=lowerCAmelCase__ ,warmup_steps=lowerCAmelCase__ ,)
if weight_decay_rate > 0.0:
lowerCamelCase_ : Optional[Any] = AdamWeightDecay(
learning_rate=lowerCAmelCase__ ,weight_decay_rate=lowerCAmelCase__ ,beta_a=lowerCAmelCase__ ,beta_a=lowerCAmelCase__ ,epsilon=lowerCAmelCase__ ,clipnorm=lowerCAmelCase__ ,global_clipnorm=lowerCAmelCase__ ,exclude_from_weight_decay=['LayerNorm', 'layer_norm', 'bias'] ,include_in_weight_decay=lowerCAmelCase__ ,)
else:
lowerCamelCase_ : Optional[Any] = tf.keras.optimizers.Adam(
learning_rate=lowerCAmelCase__ ,beta_a=lowerCAmelCase__ ,beta_a=lowerCAmelCase__ ,epsilon=lowerCAmelCase__ ,clipnorm=lowerCAmelCase__ ,global_clipnorm=lowerCAmelCase__ ,)
# We return the optimizer and the LR scheduler in order to better track the
# evolution of the LR independently of the optimizer.
return optimizer, lr_schedule
class UpperCamelCase_ ( lowercase__ ):
def __init__( self : Tuple , lowerCamelCase : Optional[Any] = 0.001 , lowerCamelCase : List[str] = 0.9 , lowerCamelCase : Any = 0.999 , lowerCamelCase : Union[str, Any] = 1E-7 , lowerCamelCase : Optional[Any] = False , lowerCamelCase : Union[str, Any] = 0.0 , lowerCamelCase : Union[str, Any] = None , lowerCamelCase : Optional[int] = None , lowerCamelCase : Dict = "AdamWeightDecay" , **lowerCamelCase : Optional[Any] , ):
super().__init__(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase )
lowerCamelCase_ : List[str] = weight_decay_rate
lowerCamelCase_ : Tuple = include_in_weight_decay
lowerCamelCase_ : Optional[int] = exclude_from_weight_decay
@classmethod
def __a ( cls : Tuple , lowerCamelCase : Union[str, Any] ):
lowerCamelCase_ : Union[str, Any] = {"WarmUp": WarmUp}
return super(lowerCamelCase , cls ).from_config(lowerCamelCase , custom_objects=lowerCamelCase )
def __a ( self : Union[str, Any] , lowerCamelCase : Any , lowerCamelCase : Tuple , lowerCamelCase : str ):
super(lowerCamelCase , self )._prepare_local(lowerCamelCase , lowerCamelCase , lowerCamelCase )
lowerCamelCase_ : int = tf.constant(
self.weight_decay_rate , name='adam_weight_decay_rate' )
def __a ( self : str , lowerCamelCase : List[Any] , lowerCamelCase : List[str] , lowerCamelCase : Dict ):
lowerCamelCase_ : Optional[Any] = self._do_use_weight_decay(var.name )
if do_decay:
return var.assign_sub(
learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]['weight_decay_rate'] , use_locking=self._use_locking , )
return tf.no_op()
def __a ( self : Any , lowerCamelCase : List[Any] , lowerCamelCase : Tuple=None , **lowerCamelCase : Any ):
lowerCamelCase_ : Dict = list(zip(*lowerCamelCase ) )
return super(lowerCamelCase , self ).apply_gradients(zip(lowerCamelCase , lowerCamelCase ) , name=lowerCamelCase , **lowerCamelCase )
def __a ( self : List[str] , lowerCamelCase : Dict , lowerCamelCase : str , lowerCamelCase : Optional[int] ):
if apply_state is None:
return self._decayed_lr_t[var_dtype], {}
lowerCamelCase_ : Union[str, Any] = apply_state or {}
lowerCamelCase_ : Optional[int] = apply_state.get((var_device, var_dtype) )
if coefficients is None:
lowerCamelCase_ : List[str] = self._fallback_apply_state(lowerCamelCase , lowerCamelCase )
lowerCamelCase_ : Dict = coefficients
return coefficients["lr_t"], {"apply_state": apply_state}
def __a ( self : Dict , lowerCamelCase : Any , lowerCamelCase : Dict , lowerCamelCase : int=None ):
lowerCamelCase_ : Tuple = self._get_lr(var.device , var.dtype.base_dtype , lowerCamelCase )
lowerCamelCase_ : int = self._decay_weights_op(lowerCamelCase , lowerCamelCase , lowerCamelCase )
with tf.control_dependencies([decay] ):
return super(lowerCamelCase , self )._resource_apply_dense(lowerCamelCase , lowerCamelCase , **lowerCamelCase )
def __a ( self : int , lowerCamelCase : str , lowerCamelCase : List[Any] , lowerCamelCase : str , lowerCamelCase : str=None ):
lowerCamelCase_ : Union[str, Any] = self._get_lr(var.device , var.dtype.base_dtype , lowerCamelCase )
lowerCamelCase_ : Optional[Any] = self._decay_weights_op(lowerCamelCase , lowerCamelCase , lowerCamelCase )
with tf.control_dependencies([decay] ):
return super(lowerCamelCase , self )._resource_apply_sparse(lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase )
def __a ( self : Any ):
lowerCamelCase_ : Optional[Any] = super().get_config()
config.update({'weight_decay_rate': self.weight_decay_rate} )
return config
def __a ( self : List[str] , lowerCamelCase : int ):
if self.weight_decay_rate == 0:
return False
if self._include_in_weight_decay:
for r in self._include_in_weight_decay:
if re.search(lowerCamelCase , lowerCamelCase ) is not None:
return True
if self._exclude_from_weight_decay:
for r in self._exclude_from_weight_decay:
if re.search(lowerCamelCase , lowerCamelCase ) is not None:
return False
return True
class UpperCamelCase_ ( lowercase__ ):
def __init__( self : Dict ):
lowerCamelCase_ : Dict = []
lowerCamelCase_ : List[Any] = None
@property
def __a ( self : List[str] ):
if self._accum_steps is None:
lowerCamelCase_ : Optional[Any] = tf.Variable(
tf.constant(0 , dtype=tf.intaa ) , trainable=lowerCamelCase , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
return self._accum_steps.value()
@property
def __a ( self : str ):
if not self._gradients:
raise ValueError('The accumulator should be called first to initialize the gradients' )
return [gradient.value() if gradient is not None else gradient for gradient in self._gradients]
def __call__( self : int , lowerCamelCase : Optional[int] ):
if not self._gradients:
lowerCamelCase_ : int = self.step # Create the step variable.
self._gradients.extend(
[
tf.Variable(
tf.zeros_like(lowerCamelCase ) , trainable=lowerCamelCase , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
if gradient is not None
else gradient
for gradient in gradients
] )
if len(lowerCamelCase ) != len(self._gradients ):
raise ValueError(F"Expected {len(self._gradients )} gradients, but got {len(lowerCamelCase )}" )
for accum_gradient, gradient in zip(self._gradients , lowerCamelCase ):
if accum_gradient is not None and gradient is not None:
accum_gradient.assign_add(lowerCamelCase )
self._accum_steps.assign_add(1 )
def __a ( self : Any ):
if not self._gradients:
return
self._accum_steps.assign(0 )
for gradient in self._gradients:
if gradient is not None:
gradient.assign(tf.zeros_like(lowerCamelCase ) )
| 364 |
'''simple docstring'''
from __future__ import annotations
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , snake_case ):
'''simple docstring'''
UpperCAmelCase : str = order
# a_{0} ... a_{k}
UpperCAmelCase : Optional[int] = [1.0] + [0.0] * order
# b_{0} ... b_{k}
UpperCAmelCase : List[Any] = [1.0] + [0.0] * order
# x[n-1] ... x[n-k]
UpperCAmelCase : Dict = [0.0] * self.order
# y[n-1] ... y[n-k]
UpperCAmelCase : Optional[Any] = [0.0] * self.order
def A_ ( self , snake_case , snake_case ):
'''simple docstring'''
if len(snake_case ) < self.order:
UpperCAmelCase : Dict = [1.0, *a_coeffs]
if len(snake_case ) != self.order + 1:
UpperCAmelCase : Optional[Any] = (
f"Expected a_coeffs to have {self.order + 1} elements "
f"for {self.order}-order filter, got {len(snake_case )}"
)
raise ValueError(snake_case )
if len(snake_case ) != self.order + 1:
UpperCAmelCase : Optional[Any] = (
f"Expected b_coeffs to have {self.order + 1} elements "
f"for {self.order}-order filter, got {len(snake_case )}"
)
raise ValueError(snake_case )
UpperCAmelCase : Optional[int] = a_coeffs
UpperCAmelCase : Optional[Any] = b_coeffs
def A_ ( self , snake_case ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = 0.0
# Start at index 1 and do index 0 at the end.
for i in range(1 , self.order + 1 ):
result += (
self.b_coeffs[i] * self.input_history[i - 1]
- self.a_coeffs[i] * self.output_history[i - 1]
)
UpperCAmelCase : Optional[int] = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0]
UpperCAmelCase : List[str] = self.input_history[:-1]
UpperCAmelCase : List[Any] = self.output_history[:-1]
UpperCAmelCase : str = sample
UpperCAmelCase : str = result
return result
| 679 | 0 |
"""simple docstring"""
import os
import re
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'vocab_file': 'vocab.txt',
'merges_file': 'bpe.codes',
}
a_ = {
'vocab_file': {
'vinai/phobert-base': 'https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt',
'vinai/phobert-large': 'https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt',
},
'merges_file': {
'vinai/phobert-base': 'https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes',
'vinai/phobert-large': 'https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes',
},
}
a_ = {
'vinai/phobert-base': 2_5_6,
'vinai/phobert-large': 2_5_6,
}
def __UpperCAmelCase ( __UpperCamelCase ):
__lowercase : List[Any] = set()
__lowercase : Tuple = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__lowercase : List[str] = char
__lowercase : Any = set(__UpperCamelCase )
return pairs
class UpperCAmelCase_ ( snake_case ):
UpperCamelCase =VOCAB_FILES_NAMES
UpperCamelCase =PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_="<s>" , UpperCamelCase_="</s>" , UpperCamelCase_="</s>" , UpperCamelCase_="<s>" , UpperCamelCase_="<unk>" , UpperCamelCase_="<pad>" , UpperCamelCase_="<mask>" , **UpperCamelCase_ , ) -> Dict:
super().__init__(
bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , **UpperCamelCase_ , )
__lowercase : Optional[Any] = vocab_file
__lowercase : Union[str, Any] = merges_file
__lowercase : Optional[Any] = {}
__lowercase : List[str] = 0
__lowercase : str = 1
__lowercase : Any = 2
__lowercase : Optional[int] = 3
self.add_from_file(UpperCamelCase_ )
__lowercase : List[str] = {v: k for k, v in self.encoder.items()}
with open(UpperCamelCase_ , encoding='''utf-8''' ) as merges_handle:
__lowercase : Any = merges_handle.read().split('''\n''' )[:-1]
__lowercase : Tuple = [tuple(merge.split()[:-1] ) for merge in merges]
__lowercase : List[Any] = dict(zip(UpperCamelCase_ , range(len(UpperCamelCase_ ) ) ) )
__lowercase : List[str] = {}
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__lowercase : Optional[Any] = [self.cls_token_id]
__lowercase : List[str] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase_ , token_ids_a=UpperCamelCase_ , already_has_special_tokens=UpperCamelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(UpperCamelCase_ )) + [1]
return [1] + ([0] * len(UpperCamelCase_ )) + [1, 1] + ([0] * len(UpperCamelCase_ )) + [1]
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ = None ) -> List[int]:
__lowercase : Optional[int] = [self.sep_token_id]
__lowercase : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def _lowerCamelCase ( self ) -> str:
return len(self.encoder )
def _lowerCamelCase ( self ) -> str:
return dict(self.encoder , **self.added_tokens_encoder )
def _lowerCamelCase ( self , UpperCamelCase_ ) -> int:
if token in self.cache:
return self.cache[token]
__lowercase : Union[str, Any] = tuple(UpperCamelCase_ )
__lowercase : Tuple = tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] )
__lowercase : Union[str, Any] = get_pairs(UpperCamelCase_ )
if not pairs:
return token
while True:
__lowercase : int = min(UpperCamelCase_ , key=lambda UpperCamelCase_ : self.bpe_ranks.get(UpperCamelCase_ , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
__lowercase ,__lowercase : Union[str, Any] = bigram
__lowercase : Optional[Any] = []
__lowercase : int = 0
while i < len(UpperCamelCase_ ):
try:
__lowercase : List[Any] = word.index(UpperCamelCase_ , UpperCamelCase_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__lowercase : Tuple = j
if word[i] == first and i < len(UpperCamelCase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__lowercase : Any = tuple(UpperCamelCase_ )
__lowercase : Optional[int] = new_word
if len(UpperCamelCase_ ) == 1:
break
else:
__lowercase : Tuple = get_pairs(UpperCamelCase_ )
__lowercase : Tuple = '''@@ '''.join(UpperCamelCase_ )
__lowercase : Optional[Any] = word[:-4]
__lowercase : Tuple = word
return word
def _lowerCamelCase ( self , UpperCamelCase_ ) -> Union[str, Any]:
__lowercase : List[str] = []
__lowercase : Optional[int] = re.findall(R'''\S+\n?''' , UpperCamelCase_ )
for token in words:
split_tokens.extend(list(self.bpe(UpperCamelCase_ ).split(''' ''' ) ) )
return split_tokens
def _lowerCamelCase ( self , UpperCamelCase_ ) -> Optional[Any]:
return self.encoder.get(UpperCamelCase_ , self.encoder.get(self.unk_token ) )
def _lowerCamelCase ( self , UpperCamelCase_ ) -> Any:
return self.decoder.get(UpperCamelCase_ , self.unk_token )
def _lowerCamelCase ( self , UpperCamelCase_ ) -> Union[str, Any]:
__lowercase : str = ''' '''.join(UpperCamelCase_ ).replace('''@@ ''' , '''''' ).strip()
return out_string
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ = None ) -> Tuple[str]:
if not os.path.isdir(UpperCamelCase_ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
__lowercase : str = os.path.join(
UpperCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
__lowercase : Tuple = os.path.join(
UpperCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase_ ):
copyfile(self.vocab_file , UpperCamelCase_ )
if os.path.abspath(self.merges_file ) != os.path.abspath(UpperCamelCase_ ):
copyfile(self.merges_file , UpperCamelCase_ )
return out_vocab_file, out_merge_file
def _lowerCamelCase ( self , UpperCamelCase_ ) -> Any:
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
try:
with open(UpperCamelCase_ , '''r''' , encoding='''utf-8''' ) as fd:
self.add_from_file(UpperCamelCase_ )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception(F"""Incorrect encoding detected in {f}, please rebuild the dataset""" )
return
__lowercase : Optional[int] = f.readlines()
for lineTmp in lines:
__lowercase : Optional[Any] = lineTmp.strip()
__lowercase : Tuple = line.rfind(''' ''' )
if idx == -1:
raise ValueError('''Incorrect dictionary format, expected \'<token> <cnt>\'''' )
__lowercase : List[Any] = line[:idx]
__lowercase : Union[str, Any] = len(self.encoder )
| 523 |
"""simple docstring"""
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def __UpperCAmelCase ( __UpperCamelCase ):
for param in module.parameters():
__lowercase : Tuple = False
def __UpperCAmelCase ( ):
__lowercase : Optional[int] = '''cuda''' if torch.cuda.is_available() else '''cpu'''
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
__lowercase : List[Any] = '''mps'''
if device == "mps":
print(
'''WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch'''
''' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues'''
''' with generations.''' )
return device
def __UpperCAmelCase ( __UpperCamelCase ):
__lowercase : List[Any] = plt.imshow(__UpperCamelCase )
fig.axes.get_xaxis().set_visible(__UpperCamelCase )
fig.axes.get_yaxis().set_visible(__UpperCamelCase )
plt.show()
def __UpperCAmelCase ( ):
__lowercase : Optional[Any] = datetime.now()
__lowercase : Optional[Any] = current_time.strftime('''%H:%M:%S''' )
return timestamp
| 523 | 1 |
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
lowerCAmelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
lowerCAmelCase_ = '''
Examples:
```py
>>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline
>>> import torch
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained("kandinsky-community/kandinsky-2-2-prior")
>>> pipe_prior.to("cuda")
>>> prompt = "red cat, 4k photo"
>>> out = pipe_prior(prompt)
>>> image_emb = out.image_embeds
>>> zero_image_emb = out.negative_image_embeds
>>> pipe = KandinskyV22Pipeline.from_pretrained("kandinsky-community/kandinsky-2-2-decoder")
>>> pipe.to("cuda")
>>> image = pipe(
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... height=768,
... width=768,
... num_inference_steps=50,
... ).images
>>> image[0].save("cat.png")
```
'''
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=8 ):
snake_case_ = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
snake_case_ = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class snake_case_ ( __A ):
'''simple docstring'''
def __init__( self : Optional[Any] , _UpperCamelCase : UNetaDConditionModel , _UpperCamelCase : DDPMScheduler , _UpperCamelCase : VQModel , ) ->Any:
super().__init__()
self.register_modules(
unet=_UpperCamelCase , scheduler=_UpperCamelCase , movq=_UpperCamelCase , )
snake_case_ = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def snake_case__( self : Optional[int] , _UpperCamelCase : str , _UpperCamelCase : List[Any] , _UpperCamelCase : int , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : str , _UpperCamelCase : Tuple ) ->Optional[Any]:
if latents is None:
snake_case_ = randn_tensor(_UpperCamelCase , generator=_UpperCamelCase , device=_UpperCamelCase , dtype=_UpperCamelCase )
else:
if latents.shape != shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {shape}''' )
snake_case_ = latents.to(_UpperCamelCase )
snake_case_ = latents * scheduler.init_noise_sigma
return latents
def snake_case__( self : List[Any] , _UpperCamelCase : Union[str, Any]=0 ) ->Optional[int]:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
snake_case_ = torch.device(f'''cuda:{gpu_id}''' )
snake_case_ = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_UpperCamelCase , _UpperCamelCase )
def snake_case__( self : str , _UpperCamelCase : List[str]=0 ) ->Dict:
if is_accelerate_available() and is_accelerate_version('''>=''' , '''0.17.0.dev0''' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''' )
snake_case_ = torch.device(f'''cuda:{gpu_id}''' )
if self.device.type != "cpu":
self.to('''cpu''' , silence_dtype_warnings=_UpperCamelCase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
snake_case_ = None
for cpu_offloaded_model in [self.unet, self.movq]:
snake_case_, snake_case_ = cpu_offload_with_hook(_UpperCamelCase , _UpperCamelCase , prev_module_hook=_UpperCamelCase )
# We'll offload the last model manually.
snake_case_ = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def snake_case__( self : List[str] ) ->Union[str, Any]:
if not hasattr(self.unet , '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(_UpperCamelCase , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(_UpperCamelCase )
def __call__( self : Any , _UpperCamelCase : Union[torch.FloatTensor, List[torch.FloatTensor]] , _UpperCamelCase : Union[torch.FloatTensor, List[torch.FloatTensor]] , _UpperCamelCase : int = 5_1_2 , _UpperCamelCase : int = 5_1_2 , _UpperCamelCase : int = 1_0_0 , _UpperCamelCase : float = 4.0 , _UpperCamelCase : int = 1 , _UpperCamelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _UpperCamelCase : Optional[torch.FloatTensor] = None , _UpperCamelCase : Optional[str] = "pil" , _UpperCamelCase : bool = True , ) ->Optional[Any]:
snake_case_ = self._execution_device
snake_case_ = guidance_scale > 1.0
if isinstance(_UpperCamelCase , _UpperCamelCase ):
snake_case_ = torch.cat(_UpperCamelCase , dim=0 )
snake_case_ = image_embeds.shape[0] * num_images_per_prompt
if isinstance(_UpperCamelCase , _UpperCamelCase ):
snake_case_ = torch.cat(_UpperCamelCase , dim=0 )
if do_classifier_free_guidance:
snake_case_ = image_embeds.repeat_interleave(_UpperCamelCase , dim=0 )
snake_case_ = negative_image_embeds.repeat_interleave(_UpperCamelCase , dim=0 )
snake_case_ = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=_UpperCamelCase )
self.scheduler.set_timesteps(_UpperCamelCase , device=_UpperCamelCase )
snake_case_ = self.scheduler.timesteps
snake_case_ = self.unet.config.in_channels
snake_case_, snake_case_ = downscale_height_and_width(_UpperCamelCase , _UpperCamelCase , self.movq_scale_factor )
# create initial latent
snake_case_ = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , self.scheduler , )
for i, t in enumerate(self.progress_bar(_UpperCamelCase ) ):
# expand the latents if we are doing classifier free guidance
snake_case_ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
snake_case_ = {'''image_embeds''': image_embeds}
snake_case_ = self.unet(
sample=_UpperCamelCase , timestep=_UpperCamelCase , encoder_hidden_states=_UpperCamelCase , added_cond_kwargs=_UpperCamelCase , return_dict=_UpperCamelCase , )[0]
if do_classifier_free_guidance:
snake_case_, snake_case_ = noise_pred.split(latents.shape[1] , dim=1 )
snake_case_, snake_case_ = noise_pred.chunk(2 )
snake_case_, snake_case_ = variance_pred.chunk(2 )
snake_case_ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
snake_case_ = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , '''variance_type''' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
snake_case_, snake_case_ = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
snake_case_ = self.scheduler.step(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase , )[0]
# post-processing
snake_case_ = self.movq.decode(_UpperCamelCase , force_not_quantize=_UpperCamelCase )['''sample''']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' )
if output_type in ["np", "pil"]:
snake_case_ = image * 0.5 + 0.5
snake_case_ = image.clamp(0 , 1 )
snake_case_ = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
snake_case_ = self.numpy_to_pil(_UpperCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_UpperCamelCase ) | 39 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer
from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class snake_case_ ( __A , __A , __A , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = AltDiffusionPipeline
SCREAMING_SNAKE_CASE : Dict = TEXT_TO_IMAGE_PARAMS
SCREAMING_SNAKE_CASE : Dict = TEXT_TO_IMAGE_BATCH_PARAMS
SCREAMING_SNAKE_CASE : Union[str, Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
SCREAMING_SNAKE_CASE : Dict = TEXT_TO_IMAGE_IMAGE_PARAMS
def snake_case__( self : Dict ) ->int:
torch.manual_seed(0 )
snake_case_ = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=3_2 , )
snake_case_ = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=_UpperCamelCase , set_alpha_to_one=_UpperCamelCase , )
torch.manual_seed(0 )
snake_case_ = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
# TODO: address the non-deterministic text encoder (fails for save-load tests)
# torch.manual_seed(0)
# text_encoder_config = RobertaSeriesConfig(
# hidden_size=32,
# project_dim=32,
# intermediate_size=37,
# layer_norm_eps=1e-05,
# num_attention_heads=4,
# num_hidden_layers=5,
# vocab_size=5002,
# )
# text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config)
torch.manual_seed(0 )
snake_case_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , projection_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5_0_0_2 , )
snake_case_ = CLIPTextModel(_UpperCamelCase )
snake_case_ = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' )
snake_case_ = 7_7
snake_case_ = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def snake_case__( self : str , _UpperCamelCase : Optional[int] , _UpperCamelCase : Dict=0 ) ->Any:
if str(_UpperCamelCase ).startswith('''mps''' ):
snake_case_ = torch.manual_seed(_UpperCamelCase )
else:
snake_case_ = torch.Generator(device=_UpperCamelCase ).manual_seed(_UpperCamelCase )
snake_case_ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def snake_case__( self : Dict ) ->List[str]:
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 )
def snake_case__( self : List[str] ) ->Any:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def snake_case__( self : Dict ) ->Any:
snake_case_ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
snake_case_ = self.get_dummy_components()
torch.manual_seed(0 )
snake_case_ = RobertaSeriesConfig(
hidden_size=3_2 , project_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5_0_0_2 , )
# TODO: remove after fixing the non-deterministic text encoder
snake_case_ = RobertaSeriesModelWithTransformation(_UpperCamelCase )
snake_case_ = text_encoder
snake_case_ = AltDiffusionPipeline(**_UpperCamelCase )
snake_case_ = alt_pipe.to(_UpperCamelCase )
alt_pipe.set_progress_bar_config(disable=_UpperCamelCase )
snake_case_ = self.get_dummy_inputs(_UpperCamelCase )
snake_case_ = '''A photo of an astronaut'''
snake_case_ = alt_pipe(**_UpperCamelCase )
snake_case_ = output.images
snake_case_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
snake_case_ = np.array(
[0.5748162, 0.60447145, 0.48821217, 0.50100636, 0.5431185, 0.45763683, 0.49657696, 0.48132733, 0.47573093] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def snake_case__( self : Tuple ) ->Union[str, Any]:
snake_case_ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
snake_case_ = self.get_dummy_components()
snake_case_ = PNDMScheduler(skip_prk_steps=_UpperCamelCase )
torch.manual_seed(0 )
snake_case_ = RobertaSeriesConfig(
hidden_size=3_2 , project_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5_0_0_2 , )
# TODO: remove after fixing the non-deterministic text encoder
snake_case_ = RobertaSeriesModelWithTransformation(_UpperCamelCase )
snake_case_ = text_encoder
snake_case_ = AltDiffusionPipeline(**_UpperCamelCase )
snake_case_ = alt_pipe.to(_UpperCamelCase )
alt_pipe.set_progress_bar_config(disable=_UpperCamelCase )
snake_case_ = self.get_dummy_inputs(_UpperCamelCase )
snake_case_ = alt_pipe(**_UpperCamelCase )
snake_case_ = output.images
snake_case_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
snake_case_ = np.array(
[0.51605093, 0.5707241, 0.47365507, 0.50578886, 0.5633877, 0.4642503, 0.5182081, 0.48763484, 0.49084237] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
def snake_case__( self : int ) ->List[str]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case__( self : List[str] ) ->Tuple:
# make sure here that pndm scheduler skips prk
snake_case_ = AltDiffusionPipeline.from_pretrained('''BAAI/AltDiffusion''' , safety_checker=_UpperCamelCase )
snake_case_ = alt_pipe.to(_UpperCamelCase )
alt_pipe.set_progress_bar_config(disable=_UpperCamelCase )
snake_case_ = '''A painting of a squirrel eating a burger'''
snake_case_ = torch.manual_seed(0 )
snake_case_ = alt_pipe([prompt] , generator=_UpperCamelCase , guidance_scale=6.0 , num_inference_steps=2_0 , output_type='''np''' )
snake_case_ = output.images
snake_case_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
snake_case_ = np.array([0.1010, 0.0800, 0.0794, 0.0885, 0.0843, 0.0762, 0.0769, 0.0729, 0.0586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def snake_case__( self : List[str] ) ->Optional[Any]:
snake_case_ = DDIMScheduler.from_pretrained('''BAAI/AltDiffusion''' , subfolder='''scheduler''' )
snake_case_ = AltDiffusionPipeline.from_pretrained('''BAAI/AltDiffusion''' , scheduler=_UpperCamelCase , safety_checker=_UpperCamelCase )
snake_case_ = alt_pipe.to(_UpperCamelCase )
alt_pipe.set_progress_bar_config(disable=_UpperCamelCase )
snake_case_ = '''A painting of a squirrel eating a burger'''
snake_case_ = torch.manual_seed(0 )
snake_case_ = alt_pipe([prompt] , generator=_UpperCamelCase , num_inference_steps=2 , output_type='''numpy''' )
snake_case_ = output.images
snake_case_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
snake_case_ = np.array([0.4019, 0.4052, 0.3810, 0.4119, 0.3916, 0.3982, 0.4651, 0.4195, 0.5323] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 | 39 | 1 |
def A ( lowercase ) -> int:
'''simple docstring'''
if divisor % 5 == 0 or divisor % 2 == 0:
return 0
UpperCamelCase = 1
UpperCamelCase = 1
while repunit:
UpperCamelCase = (10 * repunit + 1) % divisor
repunit_index += 1
return repunit_index
def A ( lowercase = 1_000_000 ) -> int:
'''simple docstring'''
UpperCamelCase = limit - 1
if divisor % 2 == 0:
divisor += 1
while least_divisible_repunit(lowercase ) <= limit:
divisor += 2
return divisor
if __name__ == "__main__":
print(F'''{solution() = }''')
| 3 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_camembert import CamembertTokenizer
else:
_UpperCAmelCase : List[str] = None
_UpperCAmelCase : Any = logging.get_logger(__name__)
_UpperCAmelCase : Tuple = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
_UpperCAmelCase : List[str] = {
"vocab_file": {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model",
},
"tokenizer_file": {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/tokenizer.json",
},
}
_UpperCAmelCase : Optional[int] = {
"camembert-base": 512,
}
_UpperCAmelCase : Union[str, Any] = "▁"
class lowercase ( _SCREAMING_SNAKE_CASE ):
__lowercase : str = VOCAB_FILES_NAMES
__lowercase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
__lowercase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase : List[str] = ["input_ids", "attention_mask"]
__lowercase : Tuple = CamembertTokenizer
def __init__( self , A_=None , A_=None , A_="<s>" , A_="</s>" , A_="</s>" , A_="<s>" , A_="<unk>" , A_="<pad>" , A_="<mask>" , A_=["<s>NOTUSED", "</s>NOTUSED"] , **A_ , ) -> List[Any]:
"""simple docstring"""
# Mask token behave like a normal word, i.e. include the space before it
UpperCamelCase = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else mask_token
super().__init__(
A_ , tokenizer_file=A_ , bos_token=A_ , eos_token=A_ , sep_token=A_ , cls_token=A_ , unk_token=A_ , pad_token=A_ , mask_token=A_ , additional_special_tokens=A_ , **A_ , )
UpperCamelCase = vocab_file
UpperCamelCase = False if not self.vocab_file else True
def __UpperCamelCase ( self , A_ , A_ = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCamelCase = [self.cls_token_id]
UpperCamelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __UpperCamelCase ( self , A_ , A_ = None ) -> List[int]:
"""simple docstring"""
UpperCamelCase = [self.sep_token_id]
UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __UpperCamelCase ( self , A_ , A_ = None ) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(A_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCamelCase = os.path.join(
A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ):
copyfile(self.vocab_file , A_ )
return (out_vocab_file,)
| 3 | 1 |
'''simple docstring'''
from __future__ import annotations
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Optional[int]: # noqa: E741
"""simple docstring"""
while r - l > 1:
__snake_case : str = (l + r) // 2
if v[m] >= key:
__snake_case : Any = m
else:
__snake_case : Union[str, Any] = m # noqa: E741
return r
def _a ( _lowerCamelCase ) -> int:
"""simple docstring"""
if len(_lowerCamelCase ) == 0:
return 0
__snake_case : Optional[Any] = [0] * len(_lowerCamelCase )
__snake_case : Dict = 1
__snake_case : Any = v[0]
for i in range(1 , len(_lowerCamelCase ) ):
if v[i] < tail[0]:
__snake_case : List[str] = v[i]
elif v[i] > tail[length - 1]:
__snake_case : Any = v[i]
length += 1
else:
__snake_case : Optional[Any] = v[i]
return length
if __name__ == "__main__":
import doctest
doctest.testmod()
| 26 |
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def _A ( snake_case ) -> Union[str, Any]:
# vision encoder
if "img_encoder.pos_embed" in name:
_lowercase : Any = name.replace("img_encoder.pos_embed" , "vision_model.embeddings.position_embeddings" )
if "img_encoder.patch_embed.proj" in name:
_lowercase : List[str] = name.replace("img_encoder.patch_embed.proj" , "vision_model.embeddings.patch_embeddings.projection" )
if "img_encoder.patch_embed.norm" in name:
_lowercase : str = name.replace("img_encoder.patch_embed.norm" , "vision_model.embeddings.layernorm" )
if "img_encoder.layers" in name:
_lowercase : Optional[int] = name.replace("img_encoder.layers" , "vision_model.encoder.stages" )
if "blocks" in name and "res" not in name:
_lowercase : Any = name.replace("blocks" , "layers" )
if "attn" in name and "pre_assign" not in name:
_lowercase : Dict = name.replace("attn" , "self_attn" )
if "proj" in name and "self_attn" in name and "text" not in name:
_lowercase : List[str] = name.replace("proj" , "out_proj" )
if "pre_assign_attn.attn.proj" in name:
_lowercase : Dict = name.replace("pre_assign_attn.attn.proj" , "pre_assign_attn.attn.out_proj" )
if "norm1" in name:
_lowercase : Optional[Any] = name.replace("norm1" , "layer_norm1" )
if "norm2" in name and "pre_assign" not in name:
_lowercase : str = name.replace("norm2" , "layer_norm2" )
if "img_encoder.norm" in name:
_lowercase : Optional[Any] = name.replace("img_encoder.norm" , "vision_model.layernorm" )
# text encoder
if "text_encoder.token_embedding" in name:
_lowercase : int = name.replace("text_encoder.token_embedding" , "text_model.embeddings.token_embedding" )
if "text_encoder.positional_embedding" in name:
_lowercase : Dict = name.replace("text_encoder.positional_embedding" , "text_model.embeddings.position_embedding.weight" )
if "text_encoder.transformer.resblocks." in name:
_lowercase : Union[str, Any] = name.replace("text_encoder.transformer.resblocks." , "text_model.encoder.layers." )
if "ln_1" in name:
_lowercase : str = name.replace("ln_1" , "layer_norm1" )
if "ln_2" in name:
_lowercase : Tuple = name.replace("ln_2" , "layer_norm2" )
if "c_fc" in name:
_lowercase : int = name.replace("c_fc" , "fc1" )
if "c_proj" in name:
_lowercase : str = name.replace("c_proj" , "fc2" )
if "text_encoder" in name:
_lowercase : int = name.replace("text_encoder" , "text_model" )
if "ln_final" in name:
_lowercase : Tuple = name.replace("ln_final" , "final_layer_norm" )
# projection layers
if "img_projector.linear_hidden." in name:
_lowercase : str = name.replace("img_projector.linear_hidden." , "visual_projection." )
if "img_projector.linear_out." in name:
_lowercase : int = name.replace("img_projector.linear_out." , "visual_projection.3." )
if "text_projector.linear_hidden" in name:
_lowercase : str = name.replace("text_projector.linear_hidden" , "text_projection" )
if "text_projector.linear_out" in name:
_lowercase : Optional[int] = name.replace("text_projector.linear_out" , "text_projection.3" )
return name
def _A ( snake_case , snake_case ) -> Optional[int]:
for key in orig_state_dict.copy().keys():
_lowercase : List[Any] = orig_state_dict.pop(snake_case )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
_lowercase : Optional[int] = key.split("." )
_lowercase , _lowercase : str = int(key_split[2] ), int(key_split[4] )
_lowercase : Union[str, Any] = config.vision_config.hidden_size
if "weight" in key:
_lowercase : Tuple = val[:dim, :]
_lowercase : int = val[dim : dim * 2, :]
_lowercase : List[str] = val[-dim:, :]
else:
_lowercase : Optional[int] = val[:dim]
_lowercase : int = val[dim : dim * 2]
_lowercase : Optional[int] = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
_lowercase : int = key.split("." )
_lowercase : str = int(key_split[3] )
_lowercase : List[Any] = config.text_config.hidden_size
if "weight" in key:
_lowercase : Any = val[:dim, :]
_lowercase : str = val[
dim : dim * 2, :
]
_lowercase : str = val[-dim:, :]
else:
_lowercase : List[Any] = val[:dim]
_lowercase : int = val[dim : dim * 2]
_lowercase : Dict = val[-dim:]
else:
_lowercase : List[str] = rename_key(snake_case )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
_lowercase : int = val.squeeze_()
else:
_lowercase : Union[str, Any] = val
return orig_state_dict
def _A ( ) -> List[Any]:
_lowercase : int = "http://images.cocodataset.org/val2017/000000039769.jpg"
_lowercase : Dict = Image.open(requests.get(snake_case , stream=snake_case ).raw )
return im
@torch.no_grad()
def _A ( snake_case , snake_case , snake_case="groupvit-gcc-yfcc" , snake_case=False ) -> Tuple:
_lowercase : str = GroupViTConfig()
_lowercase : Union[str, Any] = GroupViTModel(snake_case ).eval()
_lowercase : str = torch.load(snake_case , map_location="cpu" )["model"]
_lowercase : Optional[int] = convert_state_dict(snake_case , snake_case )
_lowercase , _lowercase : Dict = model.load_state_dict(snake_case , strict=snake_case )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(snake_case ) == 0)
# verify result
_lowercase : Dict = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32" )
_lowercase : Any = prepare_img()
_lowercase : Optional[Any] = processor(text=["a photo of a cat", "a photo of a dog"] , images=snake_case , padding=snake_case , return_tensors="pt" )
with torch.no_grad():
_lowercase : Optional[int] = model(**snake_case )
if model_name == "groupvit-gcc-yfcc":
_lowercase : Tuple = torch.tensor([[13.3523, 6.3629]] )
elif model_name == "groupvit-gcc-redcaps":
_lowercase : List[str] = torch.tensor([[16.1873, 8.6230]] )
else:
raise ValueError(F'''Model name {model_name} not supported.''' )
assert torch.allclose(outputs.logits_per_image , snake_case , atol=1E-3 )
processor.save_pretrained(snake_case )
model.save_pretrained(snake_case )
print("Successfully saved processor and model to" , snake_case )
if push_to_hub:
print("Pushing to the hub..." )
processor.push_to_hub(snake_case , organization="nielsr" )
model.push_to_hub(snake_case , organization="nielsr" )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to dump the processor and PyTorch model.'
)
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to GroupViT checkpoint')
parser.add_argument(
'--model_name',
default='groupvit-gccy-fcc',
type=str,
help='Name of the model. Expecting either \'groupvit-gcc-yfcc\' or \'groupvit-gcc-redcaps\'',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.',
)
_snake_case = parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 245 | 0 |
'''simple docstring'''
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation
def __UpperCamelCase ( __lowerCamelCase : List[str] ) -> Dict:
'''simple docstring'''
_a = 384
_a = 7
if "tiny" in model_name:
_a = 96
_a = (2, 2, 6, 2)
_a = (3, 6, 12, 24)
elif "small" in model_name:
_a = 96
_a = (2, 2, 18, 2)
_a = (3, 6, 12, 24)
elif "base" in model_name:
_a = 128
_a = (2, 2, 18, 2)
_a = (4, 8, 16, 32)
_a = 12
_a = 512
elif "large" in model_name:
_a = 192
_a = (2, 2, 18, 2)
_a = (6, 12, 24, 48)
_a = 12
_a = 768
# set label information
_a = 150
_a = "huggingface/label-files"
_a = "ade20k-id2label.json"
_a = json.load(open(hf_hub_download(__lowerCamelCase , __lowerCamelCase , repo_type="dataset" ) , "r" ) )
_a = {int(__lowerCamelCase ): v for k, v in idalabel.items()}
_a = {v: k for k, v in idalabel.items()}
_a = SwinConfig(
embed_dim=__lowerCamelCase , depths=__lowerCamelCase , num_heads=__lowerCamelCase , window_size=__lowerCamelCase , out_features=["stage1", "stage2", "stage3", "stage4"] , )
_a = UperNetConfig(
backbone_config=__lowerCamelCase , auxiliary_in_channels=__lowerCamelCase , num_labels=__lowerCamelCase , idalabel=__lowerCamelCase , labelaid=__lowerCamelCase , )
return config
def __UpperCamelCase ( __lowerCamelCase : Any ) -> List[Any]:
'''simple docstring'''
_a = []
# fmt: off
# stem
rename_keys.append(("backbone.patch_embed.projection.weight", "backbone.embeddings.patch_embeddings.projection.weight") )
rename_keys.append(("backbone.patch_embed.projection.bias", "backbone.embeddings.patch_embeddings.projection.bias") )
rename_keys.append(("backbone.patch_embed.norm.weight", "backbone.embeddings.norm.weight") )
rename_keys.append(("backbone.patch_embed.norm.bias", "backbone.embeddings.norm.bias") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F"backbone.stages.{i}.blocks.{j}.norm1.weight", F"backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight") )
rename_keys.append((F"backbone.stages.{i}.blocks.{j}.norm1.bias", F"backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias") )
rename_keys.append((F"backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table", F"backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table") )
rename_keys.append((F"backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index", F"backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index") )
rename_keys.append((F"backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight", F"backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight") )
rename_keys.append((F"backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias", F"backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias") )
rename_keys.append((F"backbone.stages.{i}.blocks.{j}.norm2.weight", F"backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight") )
rename_keys.append((F"backbone.stages.{i}.blocks.{j}.norm2.bias", F"backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias") )
rename_keys.append((F"backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight", F"backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight") )
rename_keys.append((F"backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias", F"backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias") )
rename_keys.append((F"backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight", F"backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight") )
rename_keys.append((F"backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias", F"backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias") )
if i < 3:
rename_keys.append((F"backbone.stages.{i}.downsample.reduction.weight", F"backbone.encoder.layers.{i}.downsample.reduction.weight") )
rename_keys.append((F"backbone.stages.{i}.downsample.norm.weight", F"backbone.encoder.layers.{i}.downsample.norm.weight") )
rename_keys.append((F"backbone.stages.{i}.downsample.norm.bias", F"backbone.encoder.layers.{i}.downsample.norm.bias") )
rename_keys.append((F"backbone.norm{i}.weight", F"backbone.hidden_states_norms.stage{i+1}.weight") )
rename_keys.append((F"backbone.norm{i}.bias", F"backbone.hidden_states_norms.stage{i+1}.bias") )
# decode head
rename_keys.extend(
[
("decode_head.conv_seg.weight", "decode_head.classifier.weight"),
("decode_head.conv_seg.bias", "decode_head.classifier.bias"),
("auxiliary_head.conv_seg.weight", "auxiliary_head.classifier.weight"),
("auxiliary_head.conv_seg.bias", "auxiliary_head.classifier.bias"),
] )
# fmt: on
return rename_keys
def __UpperCamelCase ( __lowerCamelCase : List[Any] , __lowerCamelCase : int , __lowerCamelCase : List[str] ) -> List[str]:
'''simple docstring'''
_a = dct.pop(__lowerCamelCase )
_a = val
def __UpperCamelCase ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Any] ) -> List[str]:
'''simple docstring'''
_a = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
_a = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
_a = state_dict.pop(F"backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight" )
_a = state_dict.pop(F"backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
_a = in_proj_weight[:dim, :]
_a = in_proj_bias[: dim]
_a = in_proj_weight[
dim : dim * 2, :
]
_a = in_proj_bias[
dim : dim * 2
]
_a = in_proj_weight[
-dim :, :
]
_a = in_proj_bias[-dim :]
# fmt: on
def __UpperCamelCase ( __lowerCamelCase : Optional[Any] ) -> List[str]:
'''simple docstring'''
_a , _a = x.shape
_a = x.reshape(__lowerCamelCase , 4 , in_channel // 4 )
_a = x[:, [0, 2, 1, 3], :].transpose(1 , 2 ).reshape(__lowerCamelCase , __lowerCamelCase )
return x
def __UpperCamelCase ( __lowerCamelCase : Any ) -> int:
'''simple docstring'''
_a , _a = x.shape
_a = x.reshape(__lowerCamelCase , in_channel // 4 , 4 )
_a = x[:, :, [0, 2, 1, 3]].transpose(1 , 2 ).reshape(__lowerCamelCase , __lowerCamelCase )
return x
def __UpperCamelCase ( __lowerCamelCase : Optional[Any] ) -> List[str]:
'''simple docstring'''
_a = x.shape[0]
_a = x.reshape(4 , in_channel // 4 )
_a = x[[0, 2, 1, 3], :].transpose(0 , 1 ).reshape(__lowerCamelCase )
return x
def __UpperCamelCase ( __lowerCamelCase : Any ) -> Dict:
'''simple docstring'''
_a = x.shape[0]
_a = x.reshape(in_channel // 4 , 4 )
_a = x[:, [0, 2, 1, 3]].transpose(0 , 1 ).reshape(__lowerCamelCase )
return x
def __UpperCamelCase ( __lowerCamelCase : Dict , __lowerCamelCase : List[Any] , __lowerCamelCase : Dict ) -> Optional[int]:
'''simple docstring'''
_a = {
"upernet-swin-tiny": "https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth",
"upernet-swin-small": "https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth",
"upernet-swin-base": "https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth",
"upernet-swin-large": "https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth",
}
_a = model_name_to_url[model_name]
_a = torch.hub.load_state_dict_from_url(__lowerCamelCase , map_location="cpu" , file_name=__lowerCamelCase )[
"state_dict"
]
for name, param in state_dict.items():
print(__lowerCamelCase , param.shape )
_a = get_upernet_config(__lowerCamelCase )
_a = UperNetForSemanticSegmentation(__lowerCamelCase )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
_a = state_dict.pop(__lowerCamelCase )
if "bn" in key:
_a = key.replace("bn" , "batch_norm" )
_a = val
# rename keys
_a = create_rename_keys(__lowerCamelCase )
for src, dest in rename_keys:
rename_key(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
read_in_q_k_v(__lowerCamelCase , config.backbone_config )
# fix downsample parameters
for key, value in state_dict.items():
if "downsample" in key:
if "reduction" in key:
_a = reverse_correct_unfold_reduction_order(__lowerCamelCase )
if "norm" in key:
_a = reverse_correct_unfold_norm_order(__lowerCamelCase )
model.load_state_dict(__lowerCamelCase )
# verify on image
_a = "https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg"
_a = Image.open(requests.get(__lowerCamelCase , stream=__lowerCamelCase ).raw ).convert("RGB" )
_a = SegformerImageProcessor()
_a = processor(__lowerCamelCase , return_tensors="pt" ).pixel_values
with torch.no_grad():
_a = model(__lowerCamelCase )
_a = outputs.logits
print(logits.shape )
print("First values of logits:" , logits[0, 0, :3, :3] )
# assert values
if model_name == "upernet-swin-tiny":
_a = torch.tensor(
[[-7.59_58, -7.59_58, -7.43_02], [-7.59_58, -7.59_58, -7.43_02], [-7.47_97, -7.47_97, -7.30_68]] )
elif model_name == "upernet-swin-small":
_a = torch.tensor(
[[-7.19_21, -7.19_21, -6.95_32], [-7.19_21, -7.19_21, -6.95_32], [-7.09_08, -7.09_08, -6.85_34]] )
elif model_name == "upernet-swin-base":
_a = torch.tensor(
[[-6.58_51, -6.58_51, -6.43_30], [-6.58_51, -6.58_51, -6.43_30], [-6.47_63, -6.47_63, -6.32_54]] )
elif model_name == "upernet-swin-large":
_a = torch.tensor(
[[-7.52_97, -7.52_97, -7.38_02], [-7.52_97, -7.52_97, -7.38_02], [-7.40_44, -7.40_44, -7.25_86]] )
print("Logits:" , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , __lowerCamelCase , atol=1E-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(F"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(__lowerCamelCase )
print(F"Saving processor to {pytorch_dump_folder_path}" )
processor.save_pretrained(__lowerCamelCase )
if push_to_hub:
print(F"Pushing model and processor for {model_name} to hub" )
model.push_to_hub(F"openmmlab/{model_name}" )
processor.push_to_hub(F"openmmlab/{model_name}" )
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="upernet-swin-tiny",
type=str,
choices=[f'''upernet-swin-{size}''' for size in ["tiny", "small", "base", "large"]],
help="Name of the Swin + UperNet model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
lowercase__ = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 276 |
'''simple docstring'''
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def __UpperCamelCase ( __lowerCamelCase : BertModel , __lowerCamelCase : str , __lowerCamelCase : str ) -> List[str]:
'''simple docstring'''
_a = ("dense.weight", "attention.self.query", "attention.self.key", "attention.self.value")
_a = (
("layer.", "layer_"),
("word_embeddings.weight", "word_embeddings"),
("position_embeddings.weight", "position_embeddings"),
("token_type_embeddings.weight", "token_type_embeddings"),
(".", "/"),
("LayerNorm/weight", "LayerNorm/gamma"),
("LayerNorm/bias", "LayerNorm/beta"),
("weight", "kernel"),
)
if not os.path.isdir(__lowerCamelCase ):
os.makedirs(__lowerCamelCase )
_a = model.state_dict()
def to_tf_var_name(__lowerCamelCase : str ):
for patt, repl in iter(__lowerCamelCase ):
_a = name.replace(__lowerCamelCase , __lowerCamelCase )
return F"bert/{name}"
def create_tf_var(__lowerCamelCase : np.ndarray , __lowerCamelCase : str , __lowerCamelCase : tf.Session ):
_a = tf.dtypes.as_dtype(tensor.dtype )
_a = tf.get_variable(dtype=__lowerCamelCase , shape=tensor.shape , name=__lowerCamelCase , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(__lowerCamelCase )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
_a = to_tf_var_name(__lowerCamelCase )
_a = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
_a = torch_tensor.T
_a = create_tf_var(tensor=__lowerCamelCase , name=__lowerCamelCase , session=__lowerCamelCase )
tf.keras.backend.set_value(__lowerCamelCase , __lowerCamelCase )
_a = session.run(__lowerCamelCase )
print(F"Successfully created {tf_name}: {np.allclose(__lowerCamelCase , __lowerCamelCase )}" )
_a = tf.train.Saver(tf.trainable_variables() )
saver.save(__lowerCamelCase , os.path.join(__lowerCamelCase , model_name.replace("-" , "_" ) + ".ckpt" ) )
def __UpperCamelCase ( __lowerCamelCase : str=None ) -> Optional[int]:
'''simple docstring'''
_a = argparse.ArgumentParser()
parser.add_argument("--model_name" , type=__lowerCamelCase , required=__lowerCamelCase , help="model name e.g. bert-base-uncased" )
parser.add_argument(
"--cache_dir" , type=__lowerCamelCase , default=__lowerCamelCase , required=__lowerCamelCase , help="Directory containing pytorch model" )
parser.add_argument("--pytorch_model_path" , type=__lowerCamelCase , required=__lowerCamelCase , help="/path/to/<pytorch-model-name>.bin" )
parser.add_argument("--tf_cache_dir" , type=__lowerCamelCase , required=__lowerCamelCase , help="Directory in which to save tensorflow model" )
_a = parser.parse_args(__lowerCamelCase )
_a = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=__lowerCamelCase , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 276 | 1 |
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class lowerCAmelCase_ ( snake_case__ ):
UpperCamelCase_ :Dict = ['image_processor', 'tokenizer']
UpperCamelCase_ :Optional[Any] = 'OwlViTImageProcessor'
UpperCamelCase_ :List[str] = ('CLIPTokenizer', 'CLIPTokenizerFast')
def __init__( self : List[str] , SCREAMING_SNAKE_CASE_ : Tuple=None , SCREAMING_SNAKE_CASE_ : Tuple=None , **SCREAMING_SNAKE_CASE_ : str ):
lowerCAmelCase__ = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , SCREAMING_SNAKE_CASE_ , )
lowerCAmelCase__ = kwargs.pop('''feature_extractor''' )
lowerCAmelCase__ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def __call__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[int]=None , SCREAMING_SNAKE_CASE_ : int=None , SCREAMING_SNAKE_CASE_ : Optional[int]=None , SCREAMING_SNAKE_CASE_ : List[Any]="max_length" , SCREAMING_SNAKE_CASE_ : Tuple="np" , **SCREAMING_SNAKE_CASE_ : str ):
if text is None and query_images is None and images is None:
raise ValueError(
'''You have to specify at least one text or query image or image. All three cannot be none.''' )
if text is not None:
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or (isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and not isinstance(text[0] , SCREAMING_SNAKE_CASE_ )):
lowerCAmelCase__ = [self.tokenizer(SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )]
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and isinstance(text[0] , SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase__ = []
# Maximum number of queries across batch
lowerCAmelCase__ = max([len(SCREAMING_SNAKE_CASE_ ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(SCREAMING_SNAKE_CASE_ ) != max_num_queries:
lowerCAmelCase__ = t + [''' '''] * (max_num_queries - len(SCREAMING_SNAKE_CASE_ ))
lowerCAmelCase__ = self.tokenizer(SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
encodings.append(SCREAMING_SNAKE_CASE_ )
else:
raise TypeError('''Input text should be a string, a list of strings or a nested list of strings''' )
if return_tensors == "np":
lowerCAmelCase__ = np.concatenate([encoding['''input_ids'''] for encoding in encodings] , axis=0 )
lowerCAmelCase__ = np.concatenate([encoding['''attention_mask'''] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
lowerCAmelCase__ = jnp.concatenate([encoding['''input_ids'''] for encoding in encodings] , axis=0 )
lowerCAmelCase__ = jnp.concatenate([encoding['''attention_mask'''] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
lowerCAmelCase__ = torch.cat([encoding['''input_ids'''] for encoding in encodings] , dim=0 )
lowerCAmelCase__ = torch.cat([encoding['''attention_mask'''] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
lowerCAmelCase__ = tf.stack([encoding['''input_ids'''] for encoding in encodings] , axis=0 )
lowerCAmelCase__ = tf.stack([encoding['''attention_mask'''] for encoding in encodings] , axis=0 )
else:
raise ValueError('''Target return tensor type could not be returned''' )
lowerCAmelCase__ = BatchEncoding()
lowerCAmelCase__ = input_ids
lowerCAmelCase__ = attention_mask
if query_images is not None:
lowerCAmelCase__ = BatchEncoding()
lowerCAmelCase__ = self.image_processor(
SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).pixel_values
lowerCAmelCase__ = query_pixel_values
if images is not None:
lowerCAmelCase__ = self.image_processor(SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
if text is not None and images is not None:
lowerCAmelCase__ = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
lowerCAmelCase__ = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**SCREAMING_SNAKE_CASE_ ) , tensor_type=SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : str , *SCREAMING_SNAKE_CASE_ : Tuple , **SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
return self.image_processor.post_process(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Optional[Any] , *SCREAMING_SNAKE_CASE_ : Any , **SCREAMING_SNAKE_CASE_ : int ):
return self.image_processor.post_process_object_detection(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Union[str, Any] , *SCREAMING_SNAKE_CASE_ : Optional[Any] , **SCREAMING_SNAKE_CASE_ : str ):
return self.image_processor.post_process_image_guided_detection(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : List[str] , *SCREAMING_SNAKE_CASE_ : Optional[Any] , **SCREAMING_SNAKE_CASE_ : List[str] ):
return self.tokenizer.batch_decode(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Union[str, Any] , *SCREAMING_SNAKE_CASE_ : Optional[int] , **SCREAMING_SNAKE_CASE_ : Optional[int] ):
return self.tokenizer.decode(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
@property
def __snake_case ( self : List[Any] ):
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , SCREAMING_SNAKE_CASE_ , )
return self.image_processor_class
@property
def __snake_case ( self : Optional[Any] ):
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , SCREAMING_SNAKE_CASE_ , )
return self.image_processor
| 668 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_UpperCAmelCase : Dict = logging.get_logger(__name__)
_UpperCAmelCase : Optional[Any] = {"vocab_file": "sentencepiece.bpe.model"}
_UpperCAmelCase : List[Any] = {
"vocab_file": {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model",
}
}
_UpperCAmelCase : Union[str, Any] = {
"camembert-base": 512,
}
_UpperCAmelCase : Dict = "▁"
class lowerCAmelCase_ ( snake_case__ ):
UpperCamelCase_ :int = VOCAB_FILES_NAMES
UpperCamelCase_ :Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ :List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ :Dict = ['input_ids', 'attention_mask']
def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Any="<s>" , SCREAMING_SNAKE_CASE_ : Tuple="</s>" , SCREAMING_SNAKE_CASE_ : Optional[Any]="</s>" , SCREAMING_SNAKE_CASE_ : Optional[int]="<s>" , SCREAMING_SNAKE_CASE_ : List[Any]="<unk>" , SCREAMING_SNAKE_CASE_ : Optional[Any]="<pad>" , SCREAMING_SNAKE_CASE_ : str="<mask>" , SCREAMING_SNAKE_CASE_ : int=["<s>NOTUSED", "</s>NOTUSED"] , SCREAMING_SNAKE_CASE_ : Optional[Dict[str, Any]] = None , **SCREAMING_SNAKE_CASE_ : str , ):
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase__ = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else mask_token
lowerCAmelCase__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , additional_special_tokens=SCREAMING_SNAKE_CASE_ , sp_model_kwargs=self.sp_model_kwargs , **SCREAMING_SNAKE_CASE_ , )
lowerCAmelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(SCREAMING_SNAKE_CASE_ ) )
lowerCAmelCase__ = vocab_file
# HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual
# sentencepiece vocabulary (this is the case for <s> and </s>
lowerCAmelCase__ = {'''<s>NOTUSED''': 0, '''<pad>''': 1, '''</s>NOTUSED''': 2, '''<unk>''': 3}
lowerCAmelCase__ = len(self.fairseq_tokens_to_ids )
lowerCAmelCase__ = len(self.sp_model ) + len(self.fairseq_tokens_to_ids )
lowerCAmelCase__ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __snake_case ( self : Any , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCAmelCase__ = [self.cls_token_id]
lowerCAmelCase__ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None , SCREAMING_SNAKE_CASE_ : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE_ , token_ids_a=SCREAMING_SNAKE_CASE_ , already_has_special_tokens=SCREAMING_SNAKE_CASE_ )
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1]
def __snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None ):
lowerCAmelCase__ = [self.sep_token_id]
lowerCAmelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __snake_case ( self : List[Any] ):
return len(self.fairseq_tokens_to_ids ) + len(self.sp_model )
def __snake_case ( self : int ):
lowerCAmelCase__ = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : str ):
return self.sp_model.encode(SCREAMING_SNAKE_CASE_ , out_type=SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[Any] ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
elif self.sp_model.PieceToId(SCREAMING_SNAKE_CASE_ ) == 0:
# Convert sentence piece unk token to fairseq unk token index
return self.unk_token_id
return self.fairseq_offset + self.sp_model.PieceToId(SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Dict , SCREAMING_SNAKE_CASE_ : Dict ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __snake_case ( self : int , SCREAMING_SNAKE_CASE_ : Optional[int] ):
lowerCAmelCase__ = []
lowerCAmelCase__ = ''''''
lowerCAmelCase__ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE_ ) + token
lowerCAmelCase__ = True
lowerCAmelCase__ = []
else:
current_sub_tokens.append(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = False
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE_ )
return out_string.strip()
def __getstate__( self : Optional[Any] ):
lowerCAmelCase__ = self.__dict__.copy()
lowerCAmelCase__ = None
return state
def __setstate__( self : str , SCREAMING_SNAKE_CASE_ : List[Any] ):
lowerCAmelCase__ = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowerCAmelCase__ = {}
lowerCAmelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[str] = None ):
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
lowerCAmelCase__ = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE_ )
elif not os.path.isfile(self.vocab_file ):
with open(SCREAMING_SNAKE_CASE_ , '''wb''' ) as fi:
lowerCAmelCase__ = self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE_ )
return (out_vocab_file,)
| 668 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
UpperCAmelCase : Optional[int] = logging.get_logger(__name__)
UpperCAmelCase : Optional[int] = {
'Helsinki-NLP/opus-mt-en-de': 'https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json',
# See all Marian models at https://huggingface.co/models?filter=marian
}
class lowerCamelCase (__a ):
_lowercase : Optional[int] = """marian"""
_lowercase : Any = ["""past_key_values"""]
_lowercase : Optional[int] = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self , lowercase__=58_101 , lowercase__=None , lowercase__=1_024 , lowercase__=12 , lowercase__=4_096 , lowercase__=16 , lowercase__=12 , lowercase__=4_096 , lowercase__=16 , lowercase__=0.0 , lowercase__=0.0 , lowercase__=True , lowercase__=True , lowercase__="gelu" , lowercase__=1_024 , lowercase__=0.1 , lowercase__=0.0 , lowercase__=0.0 , lowercase__=0.02 , lowercase__=58_100 , lowercase__=False , lowercase__=58_100 , lowercase__=0 , lowercase__=0 , lowercase__=True , **lowercase__ , ) -> Tuple:
"""simple docstring"""
_snake_case : Optional[Any] = vocab_size
_snake_case : Optional[Any] = decoder_vocab_size or vocab_size
_snake_case : Dict = max_position_embeddings
_snake_case : Dict = d_model
_snake_case : Optional[Any] = encoder_ffn_dim
_snake_case : Dict = encoder_layers
_snake_case : Dict = encoder_attention_heads
_snake_case : Any = decoder_ffn_dim
_snake_case : str = decoder_layers
_snake_case : List[str] = decoder_attention_heads
_snake_case : Optional[int] = dropout
_snake_case : Any = attention_dropout
_snake_case : Optional[Any] = activation_dropout
_snake_case : Any = activation_function
_snake_case : Any = init_std
_snake_case : Any = encoder_layerdrop
_snake_case : Any = decoder_layerdrop
_snake_case : Tuple = use_cache
_snake_case : Optional[int] = encoder_layers
_snake_case : str = scale_embedding # scale factor will be sqrt(d_model) if True
_snake_case : int = share_encoder_decoder_embeddings
super().__init__(
pad_token_id=A__ , eos_token_id=A__ , is_encoder_decoder=A__ , decoder_start_token_id=A__ , forced_eos_token_id=A__ , **A__ , )
class lowerCamelCase (__a ):
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs
def UpperCAmelCase_ ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
_snake_case : int = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
_snake_case : Dict = {0: '''batch'''}
_snake_case : Dict = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
_snake_case : List[str] = {0: '''batch''', 1: '''decoder_sequence'''}
_snake_case : Dict = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(A__ , direction='''inputs''' )
elif self.task == "causal-lm":
# TODO: figure this case out.
_snake_case : Tuple = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
_snake_case , _snake_case : Optional[Any] = self.num_layers
for i in range(A__ ):
_snake_case : Optional[int] = {0: '''batch''', 2: '''past_sequence + sequence'''}
_snake_case : List[Any] = {0: '''batch''', 2: '''past_sequence + sequence'''}
else:
_snake_case : str = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}),
('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}),
] )
return common_inputs
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs
def UpperCAmelCase_ ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
_snake_case : Union[str, Any] = super().outputs
else:
_snake_case : List[str] = super(A__ , self ).outputs
if self.use_past:
_snake_case , _snake_case : int = self.num_layers
for i in range(A__ ):
_snake_case : Dict = {0: '''batch''', 2: '''past_sequence + sequence'''}
_snake_case : Union[str, Any] = {0: '''batch''', 2: '''past_sequence + sequence'''}
return common_outputs
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ = -1 , lowercase__ = -1 , lowercase__ = False , lowercase__ = None , ) -> Mapping[str, Any]:
"""simple docstring"""
_snake_case : Dict = self._generate_dummy_inputs_for_encoder_and_decoder(
A__ , A__ , A__ , A__ , A__ )
# Generate decoder inputs
_snake_case : Dict = seq_length if not self.use_past else 1
_snake_case : List[str] = self._generate_dummy_inputs_for_encoder_and_decoder(
A__ , A__ , A__ , A__ , A__ )
_snake_case : str = {F'''decoder_{name}''': tensor for name, tensor in decoder_inputs.items()}
_snake_case : Optional[int] = dict(**A__ , **A__ )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
_snake_case , _snake_case : Dict = common_inputs['''input_ids'''].shape
_snake_case : Dict = common_inputs['''decoder_input_ids'''].shape[1]
_snake_case , _snake_case : Optional[Any] = self.num_attention_heads
_snake_case : str = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_snake_case : List[str] = decoder_seq_length + 3
_snake_case : int = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
_snake_case : Any = torch.cat(
[common_inputs['''decoder_attention_mask'''], torch.ones(A__ , A__ )] , dim=1 )
_snake_case : Optional[int] = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
_snake_case , _snake_case : Tuple = self.num_layers
_snake_case : int = min(A__ , A__ )
_snake_case : str = max(A__ , A__ ) - min_num_layers
_snake_case : str = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder'''
for _ in range(A__ ):
common_inputs["past_key_values"].append(
(
torch.zeros(A__ ),
torch.zeros(A__ ),
torch.zeros(A__ ),
torch.zeros(A__ ),
) )
# TODO: test this.
_snake_case : Tuple = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape
for _ in range(A__ , A__ ):
common_inputs["past_key_values"].append((torch.zeros(A__ ), torch.zeros(A__ )) )
return common_inputs
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ = -1 , lowercase__ = -1 , lowercase__ = False , lowercase__ = None , ) -> Mapping[str, Any]:
"""simple docstring"""
_snake_case : Optional[int] = self._generate_dummy_inputs_for_encoder_and_decoder(
A__ , A__ , A__ , A__ , A__ )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
_snake_case , _snake_case : List[Any] = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
_snake_case : Union[str, Any] = seqlen + 2
_snake_case , _snake_case : List[str] = self.num_layers
_snake_case , _snake_case : List[Any] = self.num_attention_heads
_snake_case : Any = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_snake_case : Dict = common_inputs['''attention_mask'''].dtype
_snake_case : Union[str, Any] = torch.cat(
[common_inputs['''attention_mask'''], torch.ones(A__ , A__ , dtype=A__ )] , dim=1 )
_snake_case : Optional[int] = [
(torch.zeros(A__ ), torch.zeros(A__ )) for _ in range(A__ )
]
return common_inputs
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ = -1 , lowercase__ = -1 , lowercase__ = False , lowercase__ = None , ) -> Mapping[str, Any]:
"""simple docstring"""
_snake_case : Optional[Any] = compute_effective_axis_dimension(
A__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
_snake_case : str = tokenizer.num_special_tokens_to_add(A__ )
_snake_case : Tuple = compute_effective_axis_dimension(
A__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=A__ )
# Generate dummy inputs according to compute batch and sequence
_snake_case : Union[str, Any] = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size
_snake_case : Union[str, Any] = dict(tokenizer(A__ , return_tensors=A__ ) )
return common_inputs
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ = -1 , lowercase__ = -1 , lowercase__ = False , lowercase__ = None , ) -> Mapping[str, Any]:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
_snake_case : Tuple = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
A__ , batch_size=A__ , seq_length=A__ , is_pair=A__ , framework=A__ )
else:
_snake_case : Any = self._generate_dummy_inputs_for_causal_lm(
A__ , batch_size=A__ , seq_length=A__ , is_pair=A__ , framework=A__ )
return common_inputs
def UpperCAmelCase_ ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> List[str]:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
_snake_case : Tuple = super()._flatten_past_key_values_(A__ , A__ , A__ , A__ )
else:
_snake_case : Tuple = super(A__ , self )._flatten_past_key_values_(
A__ , A__ , A__ , A__ )
@property
def UpperCAmelCase_ ( self ) -> float:
"""simple docstring"""
return 1E-4
| 700 |
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase : List[str] = logging.get_logger(__name__)
UpperCAmelCase : Dict = {
'asapp/sew-d-tiny-100k': 'https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json',
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class lowerCamelCase (a__ ):
_lowercase : List[str] = """sew-d"""
def __init__( self , lowercase__=32 , lowercase__=768 , lowercase__=12 , lowercase__=12 , lowercase__=3_072 , lowercase__=2 , lowercase__=512 , lowercase__=256 , lowercase__=True , lowercase__=True , lowercase__=("p2c", "c2p") , lowercase__="layer_norm" , lowercase__="gelu_python" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=0.1 , lowercase__=0.0 , lowercase__=0.1 , lowercase__=0.02 , lowercase__=1E-7 , lowercase__=1E-5 , lowercase__="group" , lowercase__="gelu" , lowercase__=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , lowercase__=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , lowercase__=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , lowercase__=False , lowercase__=128 , lowercase__=16 , lowercase__=True , lowercase__=0.05 , lowercase__=10 , lowercase__=2 , lowercase__=0.0 , lowercase__=10 , lowercase__=0 , lowercase__="mean" , lowercase__=False , lowercase__=False , lowercase__=256 , lowercase__=0 , lowercase__=1 , lowercase__=2 , **lowercase__ , ) -> Dict:
"""simple docstring"""
super().__init__(**lowercase__ , pad_token_id=lowercase__ , bos_token_id=lowercase__ , eos_token_id=lowercase__ )
_snake_case : List[str] = hidden_size
_snake_case : Optional[Any] = feat_extract_norm
_snake_case : Tuple = feat_extract_activation
_snake_case : Tuple = list(lowercase__ )
_snake_case : Any = list(lowercase__ )
_snake_case : Any = list(lowercase__ )
_snake_case : Any = conv_bias
_snake_case : List[Any] = num_conv_pos_embeddings
_snake_case : Any = num_conv_pos_embedding_groups
_snake_case : Union[str, Any] = len(self.conv_dim )
_snake_case : Optional[Any] = num_hidden_layers
_snake_case : Optional[int] = intermediate_size
_snake_case : Any = squeeze_factor
_snake_case : Optional[Any] = max_position_embeddings
_snake_case : Tuple = position_buckets
_snake_case : Tuple = share_att_key
_snake_case : Any = relative_attention
_snake_case : Optional[int] = norm_rel_ebd
_snake_case : Optional[Any] = list(lowercase__ )
_snake_case : List[Any] = hidden_act
_snake_case : List[Any] = num_attention_heads
_snake_case : Dict = hidden_dropout
_snake_case : Tuple = attention_dropout
_snake_case : Union[str, Any] = activation_dropout
_snake_case : List[Any] = feat_proj_dropout
_snake_case : Optional[int] = final_dropout
_snake_case : Optional[Any] = layer_norm_eps
_snake_case : Dict = feature_layer_norm_eps
_snake_case : List[Any] = initializer_range
_snake_case : Dict = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect.'''
'''It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'''
F'''but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)'''
F'''= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_snake_case : Union[str, Any] = apply_spec_augment
_snake_case : Any = mask_time_prob
_snake_case : List[str] = mask_time_length
_snake_case : Dict = mask_time_min_masks
_snake_case : Union[str, Any] = mask_feature_prob
_snake_case : Tuple = mask_feature_length
_snake_case : Union[str, Any] = mask_feature_min_masks
# ctc loss
_snake_case : Optional[Any] = ctc_loss_reduction
_snake_case : Optional[Any] = ctc_zero_infinity
# sequence classification
_snake_case : List[Any] = use_weighted_layer_sum
_snake_case : Any = classifier_proj_size
@property
def UpperCAmelCase_ ( self ) -> Any:
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 47 | 0 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES
from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType
from ...utils.imports import is_botoa_available
from .config_args import SageMakerConfig
from .config_utils import (
DYNAMO_BACKENDS,
_ask_field,
_ask_options,
_convert_dynamo_backend,
_convert_mixed_precision,
_convert_sagemaker_distributed_mode,
_convert_yes_no_to_bool,
)
if is_botoa_available():
import botoa # noqa: F401
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : Optional[int] = botoa.client('''iam''' )
__SCREAMING_SNAKE_CASE : Tuple = {
'''Version''': '''2012-10-17''',
'''Statement''': [
{'''Effect''': '''Allow''', '''Principal''': {'''Service''': '''sagemaker.amazonaws.com'''}, '''Action''': '''sts:AssumeRole'''}
],
}
try:
# create the role, associated with the chosen trust policy
iam_client.create_role(
RoleName=lowercase__ , AssumeRolePolicyDocument=json.dumps(lowercase__ , indent=2 ) )
__SCREAMING_SNAKE_CASE : str = {
'''Version''': '''2012-10-17''',
'''Statement''': [
{
'''Effect''': '''Allow''',
'''Action''': [
'''sagemaker:*''',
'''ecr:GetDownloadUrlForLayer''',
'''ecr:BatchGetImage''',
'''ecr:BatchCheckLayerAvailability''',
'''ecr:GetAuthorizationToken''',
'''cloudwatch:PutMetricData''',
'''cloudwatch:GetMetricData''',
'''cloudwatch:GetMetricStatistics''',
'''cloudwatch:ListMetrics''',
'''logs:CreateLogGroup''',
'''logs:CreateLogStream''',
'''logs:DescribeLogStreams''',
'''logs:PutLogEvents''',
'''logs:GetLogEvents''',
'''s3:CreateBucket''',
'''s3:ListBucket''',
'''s3:GetBucketLocation''',
'''s3:GetObject''',
'''s3:PutObject''',
],
'''Resource''': '''*''',
}
],
}
# attach policy to role
iam_client.put_role_policy(
RoleName=lowercase__ , PolicyName=F'''{role_name}_policy_permission''' , PolicyDocument=json.dumps(lowercase__ , indent=2 ) , )
except iam_client.exceptions.EntityAlreadyExistsException:
print(F'''role {role_name} already exists. Using existing one''' )
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : Optional[int] = botoa.client('''iam''' )
return iam_client.get_role(RoleName=lowercase__ )["Role"]["Arn"]
def _UpperCamelCase ( ):
__SCREAMING_SNAKE_CASE : Tuple = _ask_options(
'''How do you want to authorize?''' , ['''AWS Profile''', '''Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) '''] , lowercase__ , )
__SCREAMING_SNAKE_CASE : str = None
if credentials_configuration == 0:
__SCREAMING_SNAKE_CASE : Union[str, Any] = _ask_field('''Enter your AWS Profile name: [default] ''' , default='''default''' )
__SCREAMING_SNAKE_CASE : Dict = aws_profile
else:
print(
'''Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with,'''
'''`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = _ask_field('''AWS Access Key ID: ''' )
__SCREAMING_SNAKE_CASE : Tuple = aws_access_key_id
__SCREAMING_SNAKE_CASE : str = _ask_field('''AWS Secret Access Key: ''' )
__SCREAMING_SNAKE_CASE : List[str] = aws_secret_access_key
__SCREAMING_SNAKE_CASE : Tuple = _ask_field('''Enter your AWS Region: [us-east-1]''' , default='''us-east-1''' )
__SCREAMING_SNAKE_CASE : List[Any] = aws_region
__SCREAMING_SNAKE_CASE : List[str] = _ask_options(
'''Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?''' , ['''Provide IAM Role name''', '''Create new IAM role using credentials'''] , lowercase__ , )
if role_management == 0:
__SCREAMING_SNAKE_CASE : Union[str, Any] = _ask_field('''Enter your IAM role name: ''' )
else:
__SCREAMING_SNAKE_CASE : Tuple = '''accelerate_sagemaker_execution_role'''
print(F'''Accelerate will create an iam role "{iam_role_name}" using the provided credentials''' )
_create_iam_role_for_sagemaker(lowercase__ )
__SCREAMING_SNAKE_CASE : Dict = _ask_field(
'''Do you want to use custom Docker image? [yes/NO]: ''' , _convert_yes_no_to_bool , default=lowercase__ , error_message='''Please enter yes or no.''' , )
__SCREAMING_SNAKE_CASE : List[str] = None
if is_custom_docker_image:
__SCREAMING_SNAKE_CASE : Dict = _ask_field('''Enter your Docker image: ''' , lambda lowercase__ : str(lowercase__ ).lower() )
__SCREAMING_SNAKE_CASE : Any = _ask_field(
'''Do you want to provide SageMaker input channels with data locations? [yes/NO]: ''' , _convert_yes_no_to_bool , default=lowercase__ , error_message='''Please enter yes or no.''' , )
__SCREAMING_SNAKE_CASE : Dict = None
if is_sagemaker_inputs_enabled:
__SCREAMING_SNAKE_CASE : Optional[int] = _ask_field(
'''Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): ''' , lambda lowercase__ : str(lowercase__ ).lower() , )
__SCREAMING_SNAKE_CASE : Optional[Any] = _ask_field(
'''Do you want to enable SageMaker metrics? [yes/NO]: ''' , _convert_yes_no_to_bool , default=lowercase__ , error_message='''Please enter yes or no.''' , )
__SCREAMING_SNAKE_CASE : Any = None
if is_sagemaker_metrics_enabled:
__SCREAMING_SNAKE_CASE : Dict = _ask_field(
'''Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): ''' , lambda lowercase__ : str(lowercase__ ).lower() , )
__SCREAMING_SNAKE_CASE : Union[str, Any] = _ask_options(
'''What is the distributed mode?''' , ['''No distributed training''', '''Data parallelism'''] , _convert_sagemaker_distributed_mode , )
__SCREAMING_SNAKE_CASE : List[str] = {}
__SCREAMING_SNAKE_CASE : int = _ask_field(
'''Do you wish to optimize your script with torch dynamo?[yes/NO]:''' , _convert_yes_no_to_bool , default=lowercase__ , error_message='''Please enter yes or no.''' , )
if use_dynamo:
__SCREAMING_SNAKE_CASE : Union[str, Any] = '''dynamo_'''
__SCREAMING_SNAKE_CASE : Optional[Any] = _ask_options(
'''Which dynamo backend would you like to use?''' , [x.lower() for x in DYNAMO_BACKENDS] , _convert_dynamo_backend , default=2 , )
__SCREAMING_SNAKE_CASE : List[str] = _ask_field(
'''Do you want to customize the defaults sent to torch.compile? [yes/NO]: ''' , _convert_yes_no_to_bool , default=lowercase__ , error_message='''Please enter yes or no.''' , )
if use_custom_options:
__SCREAMING_SNAKE_CASE : Any = _ask_options(
'''Which mode do you want to use?''' , lowercase__ , lambda lowercase__ : TORCH_DYNAMO_MODES[int(lowercase__ )] , default='''default''' , )
__SCREAMING_SNAKE_CASE : Union[str, Any] = _ask_field(
'''Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: ''' , _convert_yes_no_to_bool , default=lowercase__ , error_message='''Please enter yes or no.''' , )
__SCREAMING_SNAKE_CASE : str = _ask_field(
'''Do you want to enable dynamic shape tracing? [yes/NO]: ''' , _convert_yes_no_to_bool , default=lowercase__ , error_message='''Please enter yes or no.''' , )
__SCREAMING_SNAKE_CASE : Any = '''Which EC2 instance type you want to use for your training?'''
if distributed_type != SageMakerDistributedType.NO:
__SCREAMING_SNAKE_CASE : List[str] = _ask_options(
lowercase__ , lowercase__ , lambda lowercase__ : SAGEMAKER_PARALLEL_EC2_INSTANCES[int(lowercase__ )] )
else:
eca_instance_query += "? [ml.p3.2xlarge]:"
__SCREAMING_SNAKE_CASE : Union[str, Any] = _ask_field(lowercase__ , lambda lowercase__ : str(lowercase__ ).lower() , default='''ml.p3.2xlarge''' )
__SCREAMING_SNAKE_CASE : int = 1
if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL):
__SCREAMING_SNAKE_CASE : Optional[Any] = _ask_field(
'''How many machines do you want use? [1]: ''' , lowercase__ , default=1 , )
__SCREAMING_SNAKE_CASE : List[str] = _ask_options(
'''Do you wish to use FP16 or BF16 (mixed precision)?''' , ['''no''', '''fp16''', '''bf16''', '''fp8'''] , _convert_mixed_precision , )
if use_dynamo and mixed_precision == "no":
print(
'''Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts.''' )
return SageMakerConfig(
image_uri=lowercase__ , compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER , distributed_type=lowercase__ , use_cpu=lowercase__ , dynamo_config=lowercase__ , eca_instance_type=lowercase__ , profile=lowercase__ , region=lowercase__ , iam_role_name=lowercase__ , mixed_precision=lowercase__ , num_machines=lowercase__ , sagemaker_inputs_file=lowercase__ , sagemaker_metrics_file=lowercase__ , )
| 696 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : Dict =logging.get_logger(__name__)
__lowerCAmelCase : List[Any] ={
'google/switch-base-8': 'https://huggingface.co/google/switch-base-8/blob/main/config.json',
}
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = '''switch_transformers'''
SCREAMING_SNAKE_CASE__ : Optional[int] = ['''past_key_values''']
SCREAMING_SNAKE_CASE__ : str = {'''hidden_size''': '''d_model''', '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''}
def __init__( self :Optional[int] , lowerCAmelCase__ :Union[str, Any]=32_128 , lowerCAmelCase__ :int=768 , lowerCAmelCase__ :Optional[Any]=64 , lowerCAmelCase__ :List[str]=2_048 , lowerCAmelCase__ :Optional[int]=64 , lowerCAmelCase__ :Union[str, Any]=12 , lowerCAmelCase__ :Optional[Any]=3 , lowerCAmelCase__ :Tuple=12 , lowerCAmelCase__ :Optional[int]=3 , lowerCAmelCase__ :Optional[int]=12 , lowerCAmelCase__ :Optional[Any]=8 , lowerCAmelCase__ :Tuple=False , lowerCAmelCase__ :List[Any]=0.01 , lowerCAmelCase__ :Any="float32" , lowerCAmelCase__ :int=False , lowerCAmelCase__ :int=32 , lowerCAmelCase__ :Optional[Any]=128 , lowerCAmelCase__ :Optional[Any]=0.1 , lowerCAmelCase__ :str=1E-6 , lowerCAmelCase__ :Tuple=0.001 , lowerCAmelCase__ :List[Any]=0.001 , lowerCAmelCase__ :Union[str, Any]=1.0 , lowerCAmelCase__ :Tuple="relu" , lowerCAmelCase__ :Dict=True , lowerCAmelCase__ :Optional[int]=False , lowerCAmelCase__ :List[Any]=True , lowerCAmelCase__ :List[Any]=0 , lowerCAmelCase__ :Union[str, Any]=1 , **lowerCAmelCase__ :List[str] , ) -> Tuple:
__SCREAMING_SNAKE_CASE : Any = vocab_size
__SCREAMING_SNAKE_CASE : Union[str, Any] = d_model
__SCREAMING_SNAKE_CASE : Optional[int] = d_kv
__SCREAMING_SNAKE_CASE : Tuple = d_ff
__SCREAMING_SNAKE_CASE : Tuple = num_sparse_encoder_layers
__SCREAMING_SNAKE_CASE : List[Any] = num_layers
__SCREAMING_SNAKE_CASE : Union[str, Any] = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
__SCREAMING_SNAKE_CASE : Optional[Any] = num_sparse_decoder_layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_encoder_layers > 0:
__SCREAMING_SNAKE_CASE : List[Any] = self.num_layers // self.num_sparse_encoder_layers
else:
__SCREAMING_SNAKE_CASE : Tuple = self.num_layers # HACK: this will create 0 sparse layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_decoder_layers > 0:
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.num_decoder_layers // self.num_sparse_decoder_layers
else:
__SCREAMING_SNAKE_CASE : Dict = self.num_decoder_layers # HACK: this will create 0 sparse layers
__SCREAMING_SNAKE_CASE : List[Any] = num_heads
__SCREAMING_SNAKE_CASE : List[Any] = num_experts
__SCREAMING_SNAKE_CASE : Tuple = expert_capacity
__SCREAMING_SNAKE_CASE : List[Any] = router_bias
__SCREAMING_SNAKE_CASE : Optional[Any] = router_jitter_noise
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f'''`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}''' )
__SCREAMING_SNAKE_CASE : List[Any] = router_dtype
__SCREAMING_SNAKE_CASE : Optional[Any] = router_ignore_padding_tokens
__SCREAMING_SNAKE_CASE : int = relative_attention_num_buckets
__SCREAMING_SNAKE_CASE : Any = relative_attention_max_distance
__SCREAMING_SNAKE_CASE : Union[str, Any] = dropout_rate
__SCREAMING_SNAKE_CASE : Dict = layer_norm_epsilon
__SCREAMING_SNAKE_CASE : int = initializer_factor
__SCREAMING_SNAKE_CASE : List[str] = feed_forward_proj
__SCREAMING_SNAKE_CASE : Any = use_cache
__SCREAMING_SNAKE_CASE : Union[str, Any] = add_router_probs
__SCREAMING_SNAKE_CASE : int = router_z_loss_coef
__SCREAMING_SNAKE_CASE : List[str] = router_aux_loss_coef
__SCREAMING_SNAKE_CASE : Dict = self.feed_forward_proj.split('''-''' )
__SCREAMING_SNAKE_CASE : Optional[int] = act_info[-1]
__SCREAMING_SNAKE_CASE : Optional[Any] = act_info[0] == '''gated'''
if len(lowerCAmelCase__ ) > 1 and act_info[0] != "gated" or len(lowerCAmelCase__ ) > 2:
raise ValueError(
f'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'''
'''Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '''
'''\'gated-gelu\' or \'relu\'''' )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
__SCREAMING_SNAKE_CASE : List[Any] = '''gelu_new'''
super().__init__(
pad_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , is_encoder_decoder=lowerCAmelCase__ , **lowerCAmelCase__ , )
| 696 | 1 |
from typing import List
import datasets
from datasets.tasks import AudioClassification
from ..folder_based_builder import folder_based_builder
snake_case__ = datasets.utils.logging.get_logger(__name__)
class UpperCamelCase ( folder_based_builder.FolderBasedBuilderConfig ):
'''simple docstring'''
A_ = None
A_ = None
class UpperCamelCase ( folder_based_builder.FolderBasedBuilder ):
'''simple docstring'''
A_ = datasets.Audio()
A_ = 'audio'
A_ = AudioFolderConfig
A_ = 42 # definition at the bottom of the script
A_ = AudioClassification(audio_column='audio' , label_column='label' )
snake_case__ = [
'.aiff',
'.au',
'.avr',
'.caf',
'.flac',
'.htk',
'.svx',
'.mat4',
'.mat5',
'.mpc2k',
'.ogg',
'.paf',
'.pvf',
'.raw',
'.rf64',
'.sd2',
'.sds',
'.ircam',
'.voc',
'.w64',
'.wav',
'.nist',
'.wavex',
'.wve',
'.xi',
'.mp3',
'.opus',
]
snake_case__ = AUDIO_EXTENSIONS | 638 | import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import YolosImageProcessor
class UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , A_ , A_=7 , A_=3 , A_=30 , A_=4_00 , A_=True , A_=None , A_=True , A_=[0.5, 0.5, 0.5] , A_=[0.5, 0.5, 0.5] , A_=True , A_=1 / 2_55 , A_=True , ) -> List[Any]:
"""simple docstring"""
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
_lowerCamelCase = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 13_33}
_lowerCamelCase = parent
_lowerCamelCase = batch_size
_lowerCamelCase = num_channels
_lowerCamelCase = min_resolution
_lowerCamelCase = max_resolution
_lowerCamelCase = do_resize
_lowerCamelCase = size
_lowerCamelCase = do_normalize
_lowerCamelCase = image_mean
_lowerCamelCase = image_std
_lowerCamelCase = do_rescale
_lowerCamelCase = rescale_factor
_lowerCamelCase = do_pad
def UpperCamelCase_ ( self ) -> Dict:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def UpperCamelCase_ ( self , A_ , A_=False ) -> List[str]:
"""simple docstring"""
if not batched:
_lowerCamelCase = image_inputs[0]
if isinstance(A_ , Image.Image ):
_lowerCamelCase , _lowerCamelCase = image.size
else:
_lowerCamelCase , _lowerCamelCase = image.shape[1], image.shape[2]
if w < h:
_lowerCamelCase = int(self.size['''shortest_edge'''] * h / w )
_lowerCamelCase = self.size['''shortest_edge''']
elif w > h:
_lowerCamelCase = self.size['''shortest_edge''']
_lowerCamelCase = int(self.size['''shortest_edge'''] * w / h )
else:
_lowerCamelCase = self.size['''shortest_edge''']
_lowerCamelCase = self.size['''shortest_edge''']
else:
_lowerCamelCase = []
for image in image_inputs:
_lowerCamelCase , _lowerCamelCase = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_lowerCamelCase = max(A_ , key=lambda A_ : item[0] )[0]
_lowerCamelCase = max(A_ , key=lambda A_ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class UpperCamelCase ( __lowercase , unittest.TestCase ):
'''simple docstring'''
A_ = YolosImageProcessor if is_vision_available() else None
def UpperCamelCase_ ( self ) -> Dict:
"""simple docstring"""
_lowerCamelCase = YolosImageProcessingTester(self )
@property
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase_ ( self ) -> List[Any]:
"""simple docstring"""
_lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A_ , '''image_mean''' ) )
self.assertTrue(hasattr(A_ , '''image_std''' ) )
self.assertTrue(hasattr(A_ , '''do_normalize''' ) )
self.assertTrue(hasattr(A_ , '''do_resize''' ) )
self.assertTrue(hasattr(A_ , '''size''' ) )
def UpperCamelCase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
_lowerCamelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 13_33} )
self.assertEqual(image_processor.do_pad , A_ )
_lowerCamelCase = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=A_ )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84} )
self.assertEqual(image_processor.do_pad , A_ )
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
pass
def UpperCamelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
# Initialize image_processing
_lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , Image.Image )
# Test not batched input
_lowerCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
_lowerCamelCase , _lowerCamelCase = self.image_processor_tester.get_expected_values(A_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_lowerCamelCase , _lowerCamelCase = self.image_processor_tester.get_expected_values(A_ , batched=A_ )
_lowerCamelCase = image_processing(A_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase_ ( self ) -> Any:
"""simple docstring"""
# Initialize image_processing
_lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , numpify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , np.ndarray )
# Test not batched input
_lowerCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
_lowerCamelCase , _lowerCamelCase = self.image_processor_tester.get_expected_values(A_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_lowerCamelCase = image_processing(A_ , return_tensors='''pt''' ).pixel_values
_lowerCamelCase , _lowerCamelCase = self.image_processor_tester.get_expected_values(A_ , batched=A_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase_ ( self ) -> Optional[int]:
"""simple docstring"""
# Initialize image_processing
_lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , torchify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , torch.Tensor )
# Test not batched input
_lowerCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
_lowerCamelCase , _lowerCamelCase = self.image_processor_tester.get_expected_values(A_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_lowerCamelCase = image_processing(A_ , return_tensors='''pt''' ).pixel_values
_lowerCamelCase , _lowerCamelCase = self.image_processor_tester.get_expected_values(A_ , batched=A_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase_ ( self ) -> List[str]:
"""simple docstring"""
# Initialize image_processings
_lowerCamelCase = self.image_processing_class(**self.image_processor_dict )
_lowerCamelCase = self.image_processing_class(do_resize=A_ , do_normalize=A_ , do_rescale=A_ )
# create random PyTorch tensors
_lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , torchify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , torch.Tensor )
# Test whether the method "pad" and calling the image processor return the same tensors
_lowerCamelCase = image_processing_a.pad(A_ , return_tensors='''pt''' )
_lowerCamelCase = image_processing_a(A_ , return_tensors='''pt''' )
self.assertTrue(
torch.allclose(encoded_images_with_method['''pixel_values'''] , encoded_images['''pixel_values'''] , atol=1E-4 ) )
@slow
def UpperCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
# prepare image and target
_lowerCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
_lowerCamelCase = json.loads(f.read() )
_lowerCamelCase = {'''image_id''': 3_97_69, '''annotations''': target}
# encode them
_lowerCamelCase = YolosImageProcessor.from_pretrained('''hustvl/yolos-small''' )
_lowerCamelCase = image_processing(images=A_ , annotations=A_ , return_tensors='''pt''' )
# verify pixel values
_lowerCamelCase = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['''pixel_values'''].shape , A_ )
_lowerCamelCase = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , A_ , atol=1E-4 ) )
# verify area
_lowerCamelCase = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , A_ ) )
# verify boxes
_lowerCamelCase = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , A_ )
_lowerCamelCase = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , A_ , atol=1E-3 ) )
# verify image_id
_lowerCamelCase = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , A_ ) )
# verify is_crowd
_lowerCamelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , A_ ) )
# verify class_labels
_lowerCamelCase = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , A_ ) )
# verify orig_size
_lowerCamelCase = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , A_ ) )
# verify size
_lowerCamelCase = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , A_ ) )
@slow
def UpperCamelCase_ ( self ) -> Tuple:
"""simple docstring"""
# prepare image, target and masks_path
_lowerCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
_lowerCamelCase = json.loads(f.read() )
_lowerCamelCase = {'''file_name''': '''000000039769.png''', '''image_id''': 3_97_69, '''segments_info''': target}
_lowerCamelCase = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
_lowerCamelCase = YolosImageProcessor(format='''coco_panoptic''' )
_lowerCamelCase = image_processing(images=A_ , annotations=A_ , masks_path=A_ , return_tensors='''pt''' )
# verify pixel values
_lowerCamelCase = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['''pixel_values'''].shape , A_ )
_lowerCamelCase = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , A_ , atol=1E-4 ) )
# verify area
_lowerCamelCase = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , A_ ) )
# verify boxes
_lowerCamelCase = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , A_ )
_lowerCamelCase = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , A_ , atol=1E-3 ) )
# verify image_id
_lowerCamelCase = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , A_ ) )
# verify is_crowd
_lowerCamelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , A_ ) )
# verify class_labels
_lowerCamelCase = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , A_ ) )
# verify masks
_lowerCamelCase = 82_28_73
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , A_ )
# verify orig_size
_lowerCamelCase = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , A_ ) )
# verify size
_lowerCamelCase = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , A_ ) ) | 638 | 1 |
'''simple docstring'''
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def snake_case__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ) -> Dict:
if isinstance(__UpperCamelCase ,__UpperCamelCase ):
_UpperCamelCase : Optional[int] = np.full((len(__UpperCamelCase ), sequence_length, 2) ,__UpperCamelCase )
else:
_UpperCamelCase : str = np.full((len(__UpperCamelCase ), sequence_length) ,__UpperCamelCase )
for i, tensor in enumerate(__UpperCamelCase ):
if padding_side == "right":
if isinstance(__UpperCamelCase ,__UpperCamelCase ):
_UpperCamelCase : int = tensor[:sequence_length]
else:
_UpperCamelCase : Dict = tensor[:sequence_length]
else:
if isinstance(__UpperCamelCase ,__UpperCamelCase ):
_UpperCamelCase : Union[str, Any] = tensor[:sequence_length]
else:
_UpperCamelCase : str = tensor[:sequence_length]
return out_tensor.tolist()
def snake_case__ ( UpperCamelCase ) -> str:
_UpperCamelCase : Optional[int] = ord(__UpperCamelCase )
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 1_23 and cp <= 1_26):
return True
_UpperCamelCase : Optional[Any] = unicodedata.category(__UpperCamelCase )
if cat.startswith('''P''' ):
return True
return False
@dataclass
class UpperCAmelCase ( UpperCamelCase_ ):
"""simple docstring"""
A__ : int = 42
A__ : List[str] = True
A__ : str = None
A__ : Union[str, Any] = None
A__ : Tuple = -100
A__ : List[Any] = 'pt'
def _lowercase ( self , _snake_case ) -> Optional[int]:
import torch
_UpperCamelCase : List[str] = '''label''' if '''label''' in features[0].keys() else '''labels'''
_UpperCamelCase : int = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
_UpperCamelCase : List[Any] = self.tokenizer.pad(
A_ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' if labels is None else None , )
if labels is None:
return batch
_UpperCamelCase : Optional[int] = torch.tensor(batch['''entity_ids'''] ).shape[1]
_UpperCamelCase : List[Any] = self.tokenizer.padding_side
if padding_side == "right":
_UpperCamelCase : int = [
list(A_ ) + [self.label_pad_token_id] * (sequence_length - len(A_ )) for label in labels
]
else:
_UpperCamelCase : Any = [
[self.label_pad_token_id] * (sequence_length - len(A_ )) + list(A_ ) for label in labels
]
_UpperCamelCase : List[Any] = [feature['''ner_tags'''] for feature in features]
_UpperCamelCase : Any = padding_tensor(A_ , -1 , A_ , A_ )
_UpperCamelCase : Any = [feature['''original_entity_spans'''] for feature in features]
_UpperCamelCase : int = padding_tensor(A_ , (-1, -1) , A_ , A_ )
_UpperCamelCase : Optional[Any] = {k: torch.tensor(A_ , dtype=torch.intaa ) for k, v in batch.items()}
return batch
| 683 |
import argparse
import gc
import json
import os
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
A__ : Union[str, Any] = 16
A__ : int = 32
def UpperCamelCase( __UpperCamelCase : Tuple ):
return int(x / 2**20 )
class __snake_case :
def __enter__( self : str):
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
lowerCAmelCase_ : List[str] = torch.cuda.memory_allocated()
return self
def __exit__( self : Any , *A_ : Dict):
gc.collect()
torch.cuda.empty_cache()
lowerCAmelCase_ : str = torch.cuda.memory_allocated()
lowerCAmelCase_ : Optional[int] = torch.cuda.max_memory_allocated()
lowerCAmelCase_ : List[str] = bamb(self.end - self.begin)
lowerCAmelCase_ : Optional[int] = bamb(self.peak - self.begin)
# print(f"delta used/peak {self.used:4d}/{self.peaked:4d}")
def UpperCamelCase( __UpperCamelCase : Accelerator ,__UpperCamelCase : int = 16 ,__UpperCamelCase : str = "bert-base-cased" ,__UpperCamelCase : int = 320 ,__UpperCamelCase : int = 160 ,):
lowerCAmelCase_ : Dict = AutoTokenizer.from_pretrained(__UpperCamelCase )
lowerCAmelCase_ : Any = load_dataset(
'''glue''' ,'''mrpc''' ,split={'''train''': f"""train[:{n_train}]""", '''validation''': f"""validation[:{n_val}]"""} )
def tokenize_function(__UpperCamelCase : Any ):
# max_length=None => use the model max length (it's actually the default)
lowerCAmelCase_ : Union[str, Any] = tokenizer(examples['''sentence1'''] ,examples['''sentence2'''] ,truncation=__UpperCamelCase ,max_length=__UpperCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowerCAmelCase_ : Union[str, Any] = datasets.map(
__UpperCamelCase ,batched=__UpperCamelCase ,remove_columns=['''idx''', '''sentence1''', '''sentence2'''] ,load_from_cache_file=__UpperCamelCase )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCAmelCase_ : List[str] = tokenized_datasets.rename_column('''label''' ,'''labels''' )
def collate_fn(__UpperCamelCase : List[Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(__UpperCamelCase ,padding='''max_length''' ,max_length=128 ,return_tensors='''pt''' )
return tokenizer.pad(__UpperCamelCase ,padding='''longest''' ,return_tensors='''pt''' )
# Instantiate dataloaders.
lowerCAmelCase_ : Union[str, Any] = DataLoader(
tokenized_datasets['''train'''] ,shuffle=__UpperCamelCase ,collate_fn=__UpperCamelCase ,batch_size=__UpperCamelCase )
lowerCAmelCase_ : str = DataLoader(
tokenized_datasets['''validation'''] ,shuffle=__UpperCamelCase ,collate_fn=__UpperCamelCase ,batch_size=__UpperCamelCase )
return train_dataloader, eval_dataloader
def UpperCamelCase( __UpperCamelCase : Any ,__UpperCamelCase : Tuple ):
# Initialize accelerator
lowerCAmelCase_ : Union[str, Any] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCAmelCase_ : Any = config['''lr''']
lowerCAmelCase_ : Any = int(config['''num_epochs'''] )
lowerCAmelCase_ : Any = int(config['''seed'''] )
lowerCAmelCase_ : Dict = int(config['''batch_size'''] )
lowerCAmelCase_ : Dict = args.model_name_or_path
set_seed(__UpperCamelCase )
lowerCAmelCase_ , lowerCAmelCase_ : Dict = get_dataloaders(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,args.n_train ,args.n_val )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCAmelCase_ : Any = AutoModelForSequenceClassification.from_pretrained(__UpperCamelCase ,return_dict=__UpperCamelCase )
# Instantiate optimizer
lowerCAmelCase_ : Any = (
AdamW
if accelerator.state.deepspeed_plugin is None
or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
lowerCAmelCase_ : List[str] = optimizer_cls(params=model.parameters() ,lr=__UpperCamelCase )
if accelerator.state.deepspeed_plugin is not None:
lowerCAmelCase_ : str = accelerator.state.deepspeed_plugin.deepspeed_config[
'''gradient_accumulation_steps'''
]
else:
lowerCAmelCase_ : Tuple = 1
lowerCAmelCase_ : str = (len(__UpperCamelCase ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
lowerCAmelCase_ : Union[str, Any] = get_linear_schedule_with_warmup(
optimizer=__UpperCamelCase ,num_warmup_steps=0 ,num_training_steps=__UpperCamelCase ,)
else:
lowerCAmelCase_ : List[Any] = DummyScheduler(__UpperCamelCase ,total_num_steps=__UpperCamelCase ,warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = accelerator.prepare(
__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
# We need to keep track of how many total steps we have iterated over
lowerCAmelCase_ : str = 0
# We also need to keep track of the stating epoch so files are named properly
lowerCAmelCase_ : List[Any] = 0
# Now we train the model
lowerCAmelCase_ : Union[str, Any] = {}
for epoch in range(__UpperCamelCase ,__UpperCamelCase ):
with TorchTracemalloc() as tracemalloc:
model.train()
for step, batch in enumerate(__UpperCamelCase ):
lowerCAmelCase_ : Union[str, Any] = model(**__UpperCamelCase )
lowerCAmelCase_ : Any = outputs.loss
lowerCAmelCase_ : List[str] = loss / gradient_accumulation_steps
accelerator.backward(__UpperCamelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
accelerator.print('''Memory before entering the train : {}'''.format(bamb(tracemalloc.begin ) ) )
accelerator.print('''Memory consumed at the end of the train (end-begin): {}'''.format(tracemalloc.used ) )
accelerator.print('''Peak Memory consumed during the train (max-begin): {}'''.format(tracemalloc.peaked ) )
accelerator.print(
'''Total Peak Memory consumed during the train (max): {}'''.format(
tracemalloc.peaked + bamb(tracemalloc.begin ) ) )
lowerCAmelCase_ : Tuple = tracemalloc.peaked + bamb(tracemalloc.begin )
if args.peak_memory_upper_bound is not None:
assert (
train_total_peak_memory[f"""epoch-{epoch}"""] <= args.peak_memory_upper_bound
), "Peak memory usage exceeded the upper bound"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir ,'''peak_memory_utilization.json''' ) ,'''w''' ) as f:
json.dump(__UpperCamelCase ,__UpperCamelCase )
def UpperCamelCase( ):
lowerCAmelCase_ : str = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' )
parser.add_argument(
'''--model_name_or_path''' ,type=__UpperCamelCase ,default='''bert-base-cased''' ,help='''Path to pretrained model or model identifier from huggingface.co/models.''' ,required=__UpperCamelCase ,)
parser.add_argument(
'''--output_dir''' ,type=__UpperCamelCase ,default='''.''' ,help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' ,)
parser.add_argument(
'''--peak_memory_upper_bound''' ,type=__UpperCamelCase ,default=__UpperCamelCase ,help='''The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value.''' ,)
parser.add_argument(
'''--n_train''' ,type=__UpperCamelCase ,default=320 ,help='''Number of training examples to use.''' ,)
parser.add_argument(
'''--n_val''' ,type=__UpperCamelCase ,default=160 ,help='''Number of validation examples to use.''' ,)
parser.add_argument(
'''--num_epochs''' ,type=__UpperCamelCase ,default=1 ,help='''Number of train epochs.''' ,)
lowerCAmelCase_ : Dict = parser.parse_args()
lowerCAmelCase_ : int = {'''lr''': 2e-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16}
training_function(__UpperCamelCase ,__UpperCamelCase )
if __name__ == "__main__":
main()
| 171 | 0 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class A__ ( lowercase__ ):
"""simple docstring"""
_lowercase = 4_2
class A__ ( lowercase__ , lowercase__ ):
"""simple docstring"""
@register_to_config
def __init__( self : Union[str, Any] , lowerCamelCase__ : int = 3 , lowerCamelCase__ : int = 3 , lowerCamelCase__ : Tuple[str] = ("DownEncoderBlock2D",) , lowerCamelCase__ : Tuple[str] = ("UpDecoderBlock2D",) , lowerCamelCase__ : Tuple[int] = (64,) , lowerCamelCase__ : int = 1 , lowerCamelCase__ : str = "silu" , lowerCamelCase__ : int = 3 , lowerCamelCase__ : int = 32 , lowerCamelCase__ : int = 256 , lowerCamelCase__ : int = 32 , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : float = 0.1_8215 , lowerCamelCase__ : str = "group" , ):
super().__init__()
# pass init params to Encoder
a__ : str = Encoder(
in_channels=UpperCAmelCase__ , out_channels=UpperCAmelCase__ , down_block_types=UpperCAmelCase__ , block_out_channels=UpperCAmelCase__ , layers_per_block=UpperCAmelCase__ , act_fn=UpperCAmelCase__ , norm_num_groups=UpperCAmelCase__ , double_z=UpperCAmelCase__ , )
a__ : List[str] = vq_embed_dim if vq_embed_dim is not None else latent_channels
a__ : Any = nn.Convad(UpperCAmelCase__ , UpperCAmelCase__ , 1 )
a__ : List[str] = VectorQuantizer(UpperCAmelCase__ , UpperCAmelCase__ , beta=0.25 , remap=UpperCAmelCase__ , sane_index_shape=UpperCAmelCase__ )
a__ : Optional[int] = nn.Convad(UpperCAmelCase__ , UpperCAmelCase__ , 1 )
# pass init params to Decoder
a__ : str = Decoder(
in_channels=UpperCAmelCase__ , out_channels=UpperCAmelCase__ , up_block_types=UpperCAmelCase__ , block_out_channels=UpperCAmelCase__ , layers_per_block=UpperCAmelCase__ , act_fn=UpperCAmelCase__ , norm_num_groups=UpperCAmelCase__ , norm_type=UpperCAmelCase__ , )
@apply_forward_hook
def _UpperCamelCase( self : List[Any] , lowerCamelCase__ : torch.FloatTensor , lowerCamelCase__ : bool = True ):
a__ : Any = self.encoder(UpperCAmelCase__ )
a__ : Tuple = self.quant_conv(UpperCAmelCase__ )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=UpperCAmelCase__ )
@apply_forward_hook
def _UpperCamelCase( self : Tuple , lowerCamelCase__ : torch.FloatTensor , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = True ):
# also go through quantization layer
if not force_not_quantize:
a__ : Union[str, Any] = self.quantize(UpperCAmelCase__ )
else:
a__ : List[str] = h
a__ : Union[str, Any] = self.post_quant_conv(UpperCAmelCase__ )
a__ : int = self.decoder(UpperCAmelCase__ , quant if self.config.norm_type == "spatial" else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=UpperCAmelCase__ )
def _UpperCamelCase( self : Dict , lowerCamelCase__ : torch.FloatTensor , lowerCamelCase__ : bool = True ):
a__ : List[str] = sample
a__ : Optional[Any] = self.encode(UpperCAmelCase__ ).latents
a__ : Tuple = self.decode(UpperCAmelCase__ ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=UpperCAmelCase__ )
| 700 |
from copy import deepcopy
class A__ :
"""simple docstring"""
def __init__( self : Union[str, Any] , lowerCamelCase__ : list[int] | None = None , lowerCamelCase__ : int | None = None ):
if arr is None and size is not None:
a__ : Union[str, Any] = size
a__ : Optional[Any] = [0] * size
elif arr is not None:
self.init(lowerCamelCase__ )
else:
raise ValueError("Either arr or size must be specified" )
def _UpperCamelCase( self : Optional[int] , lowerCamelCase__ : list[int] ):
a__ : Any = len(lowerCamelCase__ )
a__ : List[Any] = deepcopy(lowerCamelCase__ )
for i in range(1 , self.size ):
a__ : Union[str, Any] = self.next_(lowerCamelCase__ )
if j < self.size:
self.tree[j] += self.tree[i]
def _UpperCamelCase( self : Tuple ):
a__ : List[str] = self.tree[:]
for i in range(self.size - 1 , 0 , -1 ):
a__ : Optional[Any] = self.next_(lowerCamelCase__ )
if j < self.size:
arr[j] -= arr[i]
return arr
@staticmethod
def _UpperCamelCase( lowerCamelCase__ : int ):
return index + (index & (-index))
@staticmethod
def _UpperCamelCase( lowerCamelCase__ : int ):
return index - (index & (-index))
def _UpperCamelCase( self : str , lowerCamelCase__ : int , lowerCamelCase__ : int ):
if index == 0:
self.tree[0] += value
return
while index < self.size:
self.tree[index] += value
a__ : Optional[int] = self.next_(lowerCamelCase__ )
def _UpperCamelCase( self : List[str] , lowerCamelCase__ : int , lowerCamelCase__ : int ):
self.add(lowerCamelCase__ , value - self.get(lowerCamelCase__ ) )
def _UpperCamelCase( self : Union[str, Any] , lowerCamelCase__ : int ):
if right == 0:
return 0
a__ : Tuple = self.tree[0]
right -= 1 # make right inclusive
while right > 0:
result += self.tree[right]
a__ : List[Any] = self.prev(lowerCamelCase__ )
return result
def _UpperCamelCase( self : List[Any] , lowerCamelCase__ : int , lowerCamelCase__ : int ):
return self.prefix(lowerCamelCase__ ) - self.prefix(lowerCamelCase__ )
def _UpperCamelCase( self : Union[str, Any] , lowerCamelCase__ : int ):
return self.query(lowerCamelCase__ , index + 1 )
def _UpperCamelCase( self : int , lowerCamelCase__ : int ):
value -= self.tree[0]
if value < 0:
return -1
a__ : Union[str, Any] = 1 # Largest power of 2 <= size
while j * 2 < self.size:
j *= 2
a__ : Tuple = 0
while j > 0:
if i + j < self.size and self.tree[i + j] <= value:
value -= self.tree[i + j]
i += j
j //= 2
return i
if __name__ == "__main__":
import doctest
doctest.testmod()
| 151 | 0 |
'''simple docstring'''
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
)
| 689 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class _UpperCAmelCase ( lowerCAmelCase_ ):
a : torch.FloatTensor
class _UpperCAmelCase ( nn.Module ):
def __init__( self,__SCREAMING_SNAKE_CASE=3,__SCREAMING_SNAKE_CASE=3,__SCREAMING_SNAKE_CASE=("DownEncoderBlock2D",),__SCREAMING_SNAKE_CASE=(64,),__SCREAMING_SNAKE_CASE=2,__SCREAMING_SNAKE_CASE=32,__SCREAMING_SNAKE_CASE="silu",__SCREAMING_SNAKE_CASE=True,):
'''simple docstring'''
super().__init__()
__lowerCAmelCase = layers_per_block
__lowerCAmelCase = torch.nn.Convad(
__SCREAMING_SNAKE_CASE,block_out_channels[0],kernel_size=3,stride=1,padding=1,)
__lowerCAmelCase = None
__lowerCAmelCase = nn.ModuleList([] )
# down
__lowerCAmelCase = block_out_channels[0]
for i, down_block_type in enumerate(__SCREAMING_SNAKE_CASE ):
__lowerCAmelCase = output_channel
__lowerCAmelCase = block_out_channels[i]
__lowerCAmelCase = i == len(__SCREAMING_SNAKE_CASE ) - 1
__lowerCAmelCase = get_down_block(
__SCREAMING_SNAKE_CASE,num_layers=self.layers_per_block,in_channels=__SCREAMING_SNAKE_CASE,out_channels=__SCREAMING_SNAKE_CASE,add_downsample=not is_final_block,resnet_eps=1e-6,downsample_padding=0,resnet_act_fn=__SCREAMING_SNAKE_CASE,resnet_groups=__SCREAMING_SNAKE_CASE,attention_head_dim=__SCREAMING_SNAKE_CASE,temb_channels=__SCREAMING_SNAKE_CASE,)
self.down_blocks.append(__SCREAMING_SNAKE_CASE )
# mid
__lowerCAmelCase = UNetMidBlockaD(
in_channels=block_out_channels[-1],resnet_eps=1e-6,resnet_act_fn=__SCREAMING_SNAKE_CASE,output_scale_factor=1,resnet_time_scale_shift="""default""",attention_head_dim=block_out_channels[-1],resnet_groups=__SCREAMING_SNAKE_CASE,temb_channels=__SCREAMING_SNAKE_CASE,)
# out
__lowerCAmelCase = nn.GroupNorm(num_channels=block_out_channels[-1],num_groups=__SCREAMING_SNAKE_CASE,eps=1e-6 )
__lowerCAmelCase = nn.SiLU()
__lowerCAmelCase = 2 * out_channels if double_z else out_channels
__lowerCAmelCase = nn.Convad(block_out_channels[-1],__SCREAMING_SNAKE_CASE,3,padding=1 )
__lowerCAmelCase = False
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = x
__lowerCAmelCase = self.conv_in(__SCREAMING_SNAKE_CASE )
if self.training and self.gradient_checkpointing:
def create_custom_forward(__SCREAMING_SNAKE_CASE ):
def custom_forward(*__SCREAMING_SNAKE_CASE ):
return module(*__SCREAMING_SNAKE_CASE )
return custom_forward
# down
if is_torch_version(""">=""","""1.11.0""" ):
for down_block in self.down_blocks:
__lowerCAmelCase = torch.utils.checkpoint.checkpoint(
create_custom_forward(__SCREAMING_SNAKE_CASE ),__SCREAMING_SNAKE_CASE,use_reentrant=__SCREAMING_SNAKE_CASE )
# middle
__lowerCAmelCase = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ),__SCREAMING_SNAKE_CASE,use_reentrant=__SCREAMING_SNAKE_CASE )
else:
for down_block in self.down_blocks:
__lowerCAmelCase = torch.utils.checkpoint.checkpoint(create_custom_forward(__SCREAMING_SNAKE_CASE ),__SCREAMING_SNAKE_CASE )
# middle
__lowerCAmelCase = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ),__SCREAMING_SNAKE_CASE )
else:
# down
for down_block in self.down_blocks:
__lowerCAmelCase = down_block(__SCREAMING_SNAKE_CASE )
# middle
__lowerCAmelCase = self.mid_block(__SCREAMING_SNAKE_CASE )
# post-process
__lowerCAmelCase = self.conv_norm_out(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.conv_act(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.conv_out(__SCREAMING_SNAKE_CASE )
return sample
class _UpperCAmelCase ( nn.Module ):
def __init__( self,__SCREAMING_SNAKE_CASE=3,__SCREAMING_SNAKE_CASE=3,__SCREAMING_SNAKE_CASE=("UpDecoderBlock2D",),__SCREAMING_SNAKE_CASE=(64,),__SCREAMING_SNAKE_CASE=2,__SCREAMING_SNAKE_CASE=32,__SCREAMING_SNAKE_CASE="silu",__SCREAMING_SNAKE_CASE="group",):
'''simple docstring'''
super().__init__()
__lowerCAmelCase = layers_per_block
__lowerCAmelCase = nn.Convad(
__SCREAMING_SNAKE_CASE,block_out_channels[-1],kernel_size=3,stride=1,padding=1,)
__lowerCAmelCase = None
__lowerCAmelCase = nn.ModuleList([] )
__lowerCAmelCase = in_channels if norm_type == """spatial""" else None
# mid
__lowerCAmelCase = UNetMidBlockaD(
in_channels=block_out_channels[-1],resnet_eps=1e-6,resnet_act_fn=__SCREAMING_SNAKE_CASE,output_scale_factor=1,resnet_time_scale_shift="""default""" if norm_type == """group""" else norm_type,attention_head_dim=block_out_channels[-1],resnet_groups=__SCREAMING_SNAKE_CASE,temb_channels=__SCREAMING_SNAKE_CASE,)
# up
__lowerCAmelCase = list(reversed(__SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase = reversed_block_out_channels[0]
for i, up_block_type in enumerate(__SCREAMING_SNAKE_CASE ):
__lowerCAmelCase = output_channel
__lowerCAmelCase = reversed_block_out_channels[i]
__lowerCAmelCase = i == len(__SCREAMING_SNAKE_CASE ) - 1
__lowerCAmelCase = get_up_block(
__SCREAMING_SNAKE_CASE,num_layers=self.layers_per_block + 1,in_channels=__SCREAMING_SNAKE_CASE,out_channels=__SCREAMING_SNAKE_CASE,prev_output_channel=__SCREAMING_SNAKE_CASE,add_upsample=not is_final_block,resnet_eps=1e-6,resnet_act_fn=__SCREAMING_SNAKE_CASE,resnet_groups=__SCREAMING_SNAKE_CASE,attention_head_dim=__SCREAMING_SNAKE_CASE,temb_channels=__SCREAMING_SNAKE_CASE,resnet_time_scale_shift=__SCREAMING_SNAKE_CASE,)
self.up_blocks.append(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = output_channel
# out
if norm_type == "spatial":
__lowerCAmelCase = SpatialNorm(block_out_channels[0],__SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase = nn.GroupNorm(num_channels=block_out_channels[0],num_groups=__SCREAMING_SNAKE_CASE,eps=1e-6 )
__lowerCAmelCase = nn.SiLU()
__lowerCAmelCase = nn.Convad(block_out_channels[0],__SCREAMING_SNAKE_CASE,3,padding=1 )
__lowerCAmelCase = False
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=None ):
'''simple docstring'''
__lowerCAmelCase = z
__lowerCAmelCase = self.conv_in(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(__SCREAMING_SNAKE_CASE ):
def custom_forward(*__SCREAMING_SNAKE_CASE ):
return module(*__SCREAMING_SNAKE_CASE )
return custom_forward
if is_torch_version(""">=""","""1.11.0""" ):
# middle
__lowerCAmelCase = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ),__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,use_reentrant=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = sample.to(__SCREAMING_SNAKE_CASE )
# up
for up_block in self.up_blocks:
__lowerCAmelCase = torch.utils.checkpoint.checkpoint(
create_custom_forward(__SCREAMING_SNAKE_CASE ),__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,use_reentrant=__SCREAMING_SNAKE_CASE )
else:
# middle
__lowerCAmelCase = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ),__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = sample.to(__SCREAMING_SNAKE_CASE )
# up
for up_block in self.up_blocks:
__lowerCAmelCase = torch.utils.checkpoint.checkpoint(create_custom_forward(__SCREAMING_SNAKE_CASE ),__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
else:
# middle
__lowerCAmelCase = self.mid_block(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = sample.to(__SCREAMING_SNAKE_CASE )
# up
for up_block in self.up_blocks:
__lowerCAmelCase = up_block(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
# post-process
if latent_embeds is None:
__lowerCAmelCase = self.conv_norm_out(__SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase = self.conv_norm_out(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.conv_act(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.conv_out(__SCREAMING_SNAKE_CASE )
return sample
class _UpperCAmelCase ( nn.Module ):
def __init__( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE="random",__SCREAMING_SNAKE_CASE=False,__SCREAMING_SNAKE_CASE=True ):
'''simple docstring'''
super().__init__()
__lowerCAmelCase = n_e
__lowerCAmelCase = vq_embed_dim
__lowerCAmelCase = beta
__lowerCAmelCase = legacy
__lowerCAmelCase = nn.Embedding(self.n_e,self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e,1.0 / self.n_e )
__lowerCAmelCase = remap
if self.remap is not None:
self.register_buffer("""used""",torch.tensor(np.load(self.remap ) ) )
__lowerCAmelCase = self.used.shape[0]
__lowerCAmelCase = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
__lowerCAmelCase = self.re_embed
__lowerCAmelCase = self.re_embed + 1
print(
f'Remapping {self.n_e} indices to {self.re_embed} indices. '
f'Using {self.unknown_index} for unknown indices.' )
else:
__lowerCAmelCase = n_e
__lowerCAmelCase = sane_index_shape
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = inds.shape
assert len(__SCREAMING_SNAKE_CASE ) > 1
__lowerCAmelCase = inds.reshape(ishape[0],-1 )
__lowerCAmelCase = self.used.to(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = (inds[:, :, None] == used[None, None, ...]).long()
__lowerCAmelCase = match.argmax(-1 )
__lowerCAmelCase = match.sum(2 ) < 1
if self.unknown_index == "random":
__lowerCAmelCase = torch.randint(0,self.re_embed,size=new[unknown].shape ).to(device=new.device )
else:
__lowerCAmelCase = self.unknown_index
return new.reshape(__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = inds.shape
assert len(__SCREAMING_SNAKE_CASE ) > 1
__lowerCAmelCase = inds.reshape(ishape[0],-1 )
__lowerCAmelCase = self.used.to(__SCREAMING_SNAKE_CASE )
if self.re_embed > self.used.shape[0]: # extra token
__lowerCAmelCase = 0 # simply set to zero
__lowerCAmelCase = torch.gather(used[None, :][inds.shape[0] * [0], :],1,__SCREAMING_SNAKE_CASE )
return back.reshape(__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = z.permute(0,2,3,1 ).contiguous()
__lowerCAmelCase = z.view(-1,self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
__lowerCAmelCase = torch.argmin(torch.cdist(__SCREAMING_SNAKE_CASE,self.embedding.weight ),dim=1 )
__lowerCAmelCase = self.embedding(__SCREAMING_SNAKE_CASE ).view(z.shape )
__lowerCAmelCase = None
__lowerCAmelCase = None
# compute loss for embedding
if not self.legacy:
__lowerCAmelCase = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
__lowerCAmelCase = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
__lowerCAmelCase = z + (z_q - z).detach()
# reshape back to match original input shape
__lowerCAmelCase = z_q.permute(0,3,1,2 ).contiguous()
if self.remap is not None:
__lowerCAmelCase = min_encoding_indices.reshape(z.shape[0],-1 ) # add batch axis
__lowerCAmelCase = self.remap_to_used(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = min_encoding_indices.reshape(-1,1 ) # flatten
if self.sane_index_shape:
__lowerCAmelCase = min_encoding_indices.reshape(z_q.shape[0],z_q.shape[2],z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if self.remap is not None:
__lowerCAmelCase = indices.reshape(shape[0],-1 ) # add batch axis
__lowerCAmelCase = self.unmap_to_all(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
__lowerCAmelCase = self.embedding(__SCREAMING_SNAKE_CASE )
if shape is not None:
__lowerCAmelCase = z_q.view(__SCREAMING_SNAKE_CASE )
# reshape back to match original input shape
__lowerCAmelCase = z_q.permute(0,3,1,2 ).contiguous()
return z_q
class _UpperCAmelCase ( lowerCAmelCase_ ):
def __init__( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=False ):
'''simple docstring'''
__lowerCAmelCase = parameters
__lowerCAmelCase , __lowerCAmelCase = torch.chunk(__SCREAMING_SNAKE_CASE,2,dim=1 )
__lowerCAmelCase = torch.clamp(self.logvar,-30.0,20.0 )
__lowerCAmelCase = deterministic
__lowerCAmelCase = torch.exp(0.5 * self.logvar )
__lowerCAmelCase = torch.exp(self.logvar )
if self.deterministic:
__lowerCAmelCase = __lowerCAmelCase = torch.zeros_like(
self.mean,device=self.parameters.device,dtype=self.parameters.dtype )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE = None ):
'''simple docstring'''
__lowerCAmelCase = randn_tensor(
self.mean.shape,generator=__SCREAMING_SNAKE_CASE,device=self.parameters.device,dtype=self.parameters.dtype )
__lowerCAmelCase = self.mean + self.std * sample
return x
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE=None ):
'''simple docstring'''
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean,2 ) + self.var - 1.0 - self.logvar,dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean,2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar,dim=[1, 2, 3],)
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=[1, 2, 3] ):
'''simple docstring'''
if self.deterministic:
return torch.Tensor([0.0] )
__lowerCAmelCase = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean,2 ) / self.var,dim=__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self ):
'''simple docstring'''
return self.mean
| 689 | 1 |
def __lowerCamelCase ( __lowerCAmelCase : int , __lowerCAmelCase : int ) -> str:
if a < 0 or b < 0:
raise ValueError("""the value of both inputs must be positive""" )
__UpperCamelCase : Union[str, Any] = str(bin(__lowerCAmelCase ) )[2:] # remove the leading "0b"
__UpperCamelCase : Union[str, Any] = str(bin(__lowerCAmelCase ) )[2:] # remove the leading "0b"
__UpperCamelCase : int = max(len(__lowerCAmelCase ) , len(__lowerCAmelCase ) )
return "0b" + "".join(
str(int(char_a != char_b ) )
for char_a, char_b in zip(a_binary.zfill(__lowerCAmelCase ) , b_binary.zfill(__lowerCAmelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 707 |
from ..utils import is_flax_available, is_torch_available
if is_torch_available():
from .autoencoder_kl import AutoencoderKL
from .controlnet import ControlNetModel
from .dual_transformer_ad import DualTransformeraDModel
from .modeling_utils import ModelMixin
from .prior_transformer import PriorTransformer
from .ta_film_transformer import TaFilmDecoder
from .transformer_ad import TransformeraDModel
from .unet_ad import UNetaDModel
from .unet_ad import UNetaDModel
from .unet_ad_condition import UNetaDConditionModel
from .unet_ad_condition import UNetaDConditionModel
from .vq_model import VQModel
if is_flax_available():
from .controlnet_flax import FlaxControlNetModel
from .unet_ad_condition_flax import FlaxUNetaDConditionModel
from .vae_flax import FlaxAutoencoderKL
| 515 | 0 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP
| 305 | _lowercase : str ='''
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
_lowercase : List[str] =[{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
_lowercase : int ={
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 305 | 1 |
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class _lowercase ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ):
@register_to_config
def __init__( self : Any , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : float , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : str , __lowerCAmelCase : bool = False , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
a = nn.Embedding(__UpperCamelCase , __UpperCamelCase )
a = nn.Embedding(__UpperCamelCase , __UpperCamelCase )
a = False
a = nn.Dropout(p=__UpperCamelCase )
a = TaConfig(
vocab_size=__UpperCamelCase , d_model=__UpperCamelCase , num_heads=__UpperCamelCase , d_kv=__UpperCamelCase , d_ff=__UpperCamelCase , dropout_rate=__UpperCamelCase , feed_forward_proj=__UpperCamelCase , is_decoder=__UpperCamelCase , is_encoder_decoder=__UpperCamelCase , )
a = nn.ModuleList()
for lyr_num in range(__UpperCamelCase ):
a = TaBlock(__UpperCamelCase )
self.encoders.append(__UpperCamelCase )
a = TaLayerNorm(__UpperCamelCase )
a = nn.Dropout(p=__UpperCamelCase )
def A ( self : List[str] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[Any] ) -> int:
"""simple docstring"""
a = self.token_embedder(__UpperCamelCase )
a = encoder_input_tokens.shape[1]
a = torch.arange(__UpperCamelCase , device=encoder_input_tokens.device )
x += self.position_encoding(__UpperCamelCase )
a = self.dropout_pre(__UpperCamelCase )
# inverted the attention mask
a = encoder_input_tokens.size()
a = self.get_extended_attention_mask(__UpperCamelCase , __UpperCamelCase )
for lyr in self.encoders:
a = lyr(__UpperCamelCase , __UpperCamelCase )[0]
a = self.layer_norm(__UpperCamelCase )
return self.dropout_post(__UpperCamelCase ), encoder_inputs_mask
| 706 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
A_ : int = logging.get_logger(__name__)
A_ : str = {
'''microsoft/focalnet-tiny''': '''https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json''',
}
class _lowercase ( UpperCAmelCase__, UpperCAmelCase__ ):
_UpperCAmelCase = '''focalnet'''
def __init__( self : int , __lowerCAmelCase : Optional[Any]=224 , __lowerCAmelCase : Any=4 , __lowerCAmelCase : Any=3 , __lowerCAmelCase : Tuple=96 , __lowerCAmelCase : Dict=False , __lowerCAmelCase : Optional[int]=[192, 384, 768, 768] , __lowerCAmelCase : Union[str, Any]=[2, 2, 6, 2] , __lowerCAmelCase : Optional[int]=[2, 2, 2, 2] , __lowerCAmelCase : Union[str, Any]=[3, 3, 3, 3] , __lowerCAmelCase : str="gelu" , __lowerCAmelCase : Any=4.0 , __lowerCAmelCase : Optional[int]=0.0 , __lowerCAmelCase : Tuple=0.1 , __lowerCAmelCase : str=False , __lowerCAmelCase : Optional[int]=1E-4 , __lowerCAmelCase : Optional[Any]=False , __lowerCAmelCase : Union[str, Any]=False , __lowerCAmelCase : str=False , __lowerCAmelCase : Any=0.0_2 , __lowerCAmelCase : str=1E-5 , __lowerCAmelCase : Optional[Any]=32 , __lowerCAmelCase : Dict=None , __lowerCAmelCase : str=None , **__lowerCAmelCase : Any , ) -> List[str]:
"""simple docstring"""
super().__init__(**__lowerCAmelCase )
a = image_size
a = patch_size
a = num_channels
a = embed_dim
a = use_conv_embed
a = hidden_sizes
a = depths
a = focal_levels
a = focal_windows
a = hidden_act
a = mlp_ratio
a = hidden_dropout_prob
a = drop_path_rate
a = use_layerscale
a = layerscale_value
a = use_post_layernorm
a = use_post_layernorm_in_modulation
a = normalize_modulator
a = initializer_range
a = layer_norm_eps
a = encoder_stride
a = ["stem"] + [f"""stage{idx}""" for idx in range(1 , len(self.depths ) + 1 )]
a , a = get_aligned_output_features_output_indices(
out_features=__lowerCAmelCase , out_indices=__lowerCAmelCase , stage_names=self.stage_names )
| 32 | 0 |
def a_ ( UpperCamelCase_ : str ) -> str:
"""simple docstring"""
return "".join(chr(ord(UpperCamelCase_ ) - 3_2 ) if 'a' <= char <= 'z' else char for char in word )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 246 |
import itertools
import os
from collections import Counter, defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
import numpy as np
import datasets
from .execute import check_correctness
_lowerCAmelCase : List[Any] = '\\n@misc{chen2021evaluating,\n title={Evaluating Large Language Models Trained on Code},\n author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \\nand Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \\nand Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \\nand Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \\nand Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \\nand Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \\nand Mohammad Bavarian and Clemens Winter and Philippe Tillet \\nand Felipe Petroski Such and Dave Cummings and Matthias Plappert \\nand Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \\nand William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \\nand Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \\nand William Saunders and Christopher Hesse and Andrew N. Carr \\nand Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \\nand Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \\nand Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \\nand Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},\n year={2021},\n eprint={2107.03374},\n archivePrefix={arXiv},\n primaryClass={cs.LG}\n}\n'
_lowerCAmelCase : int = '\\nThis metric implements the evaluation harness for the HumanEval problem solving dataset\ndescribed in the paper "Evaluating Large Language Models Trained on Code"\n(https://arxiv.org/abs/2107.03374).\n'
_lowerCAmelCase : int = '\nCalculates how good are predictions given some references, using certain scores\nArgs:\n predictions: list of candidates to evaluate. Each candidates should be a list\n of strings with several code candidates to solve the problem.\n references: a list with a test for each prediction. Each test should evaluate the\n correctness of a code candidate.\n k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])\n num_workers: number of workers used to evaluate the canidate programs (Default: 4).\n timeout:\nReturns:\n pass_at_k: dict with pass rates for each k\n results: dict with granular results of each unittest\nExamples:\n >>> code_eval = datasets.load_metric("code_eval")\n >>> test_cases = ["assert add(2,3)==5"]\n >>> candidates = [["def add(a,b): return a*b", "def add(a, b): return a+b"]]\n >>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])\n >>> print(pass_at_k)\n {\'pass@1\': 0.5, \'pass@2\': 1.0}\n'
_lowerCAmelCase : Optional[int] = '\n################################################################################\n !!!WARNING!!!\n################################################################################\nThe "code_eval" metric executes untrusted model-generated code in Python.\nAlthough it is highly unlikely that model-generated code will do something\novertly malicious in response to this test suite, model-generated code may act\ndestructively due to a lack of model capability or alignment.\nUsers are strongly encouraged to sandbox this evaluation suite so that it\ndoes not perform destructive actions on their host or network. For more\ninformation on how OpenAI sandboxes its code, see the paper "Evaluating Large\nLanguage Models Trained on Code" (https://arxiv.org/abs/2107.03374).\n\nOnce you have read this disclaimer and taken appropriate precautions,\nset the environment variable HF_ALLOW_CODE_EVAL="1". Within Python you can to this\nwith:\n\n>>> import os\n>>> os.environ["HF_ALLOW_CODE_EVAL"] = "1"\n\n################################################################################\\n'
_lowerCAmelCase : Dict = 'The MIT License\n\nCopyright (c) OpenAI (https://openai.com)\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the "Software"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase ( datasets.Metric ):
'''simple docstring'''
def lowerCamelCase__ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
return datasets.MetricInfo(
# This is the description that will appear on the metrics page.
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' ) ),
'references': datasets.Value('string' ),
} ) , homepage='https://github.com/openai/human-eval' , codebase_urls=['https://github.com/openai/human-eval'] , reference_urls=['https://github.com/openai/human-eval'] , license=_LICENSE , )
def lowerCamelCase__ ( self : int , __snake_case : List[Any] , __snake_case : int , __snake_case : Any=[1, 10, 100] , __snake_case : str=4 , __snake_case : List[Any]=3.0 ) -> Union[str, Any]:
'''simple docstring'''
if os.getenv('HF_ALLOW_CODE_EVAL' , 0 ) != "1":
raise ValueError(_WARNING )
if os.name == "nt":
raise NotImplementedError('This metric is currently not supported on Windows.' )
with ThreadPoolExecutor(max_workers=__snake_case ) as executor:
lowerCamelCase = []
lowerCamelCase = Counter()
lowerCamelCase = 0
lowerCamelCase = defaultdict(__snake_case )
for task_id, (candidates, test_case) in enumerate(zip(__snake_case , __snake_case ) ):
for candidate in candidates:
lowerCamelCase = candidate + '\n' + test_case
lowerCamelCase = (test_program, timeout, task_id, completion_id[task_id])
lowerCamelCase = executor.submit(__snake_case , *__snake_case )
futures.append(__snake_case )
completion_id[task_id] += 1
n_samples += 1
for future in as_completed(__snake_case ):
lowerCamelCase = future.result()
results[result["task_id"]].append((result['completion_id'], result) )
lowerCamelCase , lowerCamelCase = [], []
for result in results.values():
result.sort()
lowerCamelCase = [r[1]['passed'] for r in result]
total.append(len(__snake_case ) )
correct.append(sum(__snake_case ) )
lowerCamelCase = np.array(__snake_case )
lowerCamelCase = np.array(__snake_case )
lowerCamelCase = k
lowerCamelCase = {F'''pass@{k}''': estimate_pass_at_k(__snake_case , __snake_case , __snake_case ).mean() for k in ks if (total >= k).all()}
return pass_at_k, results
def a_ ( UpperCamelCase_ : List[Any] , UpperCamelCase_ : int , UpperCamelCase_ : int ) -> Optional[Any]:
"""simple docstring"""
def estimator(UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : int ) -> float:
if n - c < k:
return 1.0
return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1 ) )
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
lowerCamelCase = itertools.repeat(UpperCamelCase_ , len(UpperCamelCase_ ) )
else:
assert len(UpperCamelCase_ ) == len(UpperCamelCase_ )
lowerCamelCase = iter(UpperCamelCase_ )
return np.array([estimator(int(UpperCamelCase_ ) , int(UpperCamelCase_ ) , UpperCamelCase_ ) for n, c in zip(UpperCamelCase_ , UpperCamelCase_ )] )
| 246 | 1 |
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ : int = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : int = {
'''microsoft/unispeech-sat-base-100h-libri-ft''': (
'''https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json'''
),
# See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat
}
class a__( snake_case__ ):
a_ : Union[str, Any] = '''unispeech-sat'''
def __init__( self , _UpperCAmelCase=32 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=3072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1E-5 , _UpperCAmelCase="group" , _UpperCAmelCase="gelu" , _UpperCAmelCase=(512, 512, 512, 512, 512, 512, 512) , _UpperCAmelCase=(5, 2, 2, 2, 2, 2, 2) , _UpperCAmelCase=(10, 3, 3, 3, 3, 2, 2) , _UpperCAmelCase=False , _UpperCAmelCase=128 , _UpperCAmelCase=16 , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=0.05 , _UpperCAmelCase=10 , _UpperCAmelCase=2 , _UpperCAmelCase=0.0 , _UpperCAmelCase=10 , _UpperCAmelCase=0 , _UpperCAmelCase=320 , _UpperCAmelCase=2 , _UpperCAmelCase=0.1 , _UpperCAmelCase=100 , _UpperCAmelCase=256 , _UpperCAmelCase=256 , _UpperCAmelCase=0.1 , _UpperCAmelCase="mean" , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=256 , _UpperCAmelCase=(512, 512, 512, 512, 1500) , _UpperCAmelCase=(5, 3, 3, 1, 1) , _UpperCAmelCase=(1, 2, 3, 1, 1) , _UpperCAmelCase=512 , _UpperCAmelCase=0 , _UpperCAmelCase=1 , _UpperCAmelCase=2 , _UpperCAmelCase=504 , **_UpperCAmelCase , ) -> List[str]:
super().__init__(**_UpperCAmelCase , pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase )
snake_case__ =hidden_size
snake_case__ =feat_extract_norm
snake_case__ =feat_extract_activation
snake_case__ =list(_UpperCAmelCase )
snake_case__ =list(_UpperCAmelCase )
snake_case__ =list(_UpperCAmelCase )
snake_case__ =conv_bias
snake_case__ =num_conv_pos_embeddings
snake_case__ =num_conv_pos_embedding_groups
snake_case__ =len(self.conv_dim )
snake_case__ =num_hidden_layers
snake_case__ =intermediate_size
snake_case__ =hidden_act
snake_case__ =num_attention_heads
snake_case__ =hidden_dropout
snake_case__ =attention_dropout
snake_case__ =activation_dropout
snake_case__ =feat_proj_dropout
snake_case__ =final_dropout
snake_case__ =layerdrop
snake_case__ =layer_norm_eps
snake_case__ =initializer_range
snake_case__ =vocab_size
snake_case__ =num_clusters
snake_case__ =do_stable_layer_norm
snake_case__ =use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
f""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
f""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
snake_case__ =apply_spec_augment
snake_case__ =mask_time_prob
snake_case__ =mask_time_length
snake_case__ =mask_time_min_masks
snake_case__ =mask_feature_prob
snake_case__ =mask_feature_length
snake_case__ =mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
snake_case__ =num_codevectors_per_group
snake_case__ =num_codevector_groups
snake_case__ =contrastive_logits_temperature
snake_case__ =feat_quantizer_dropout
snake_case__ =num_negatives
snake_case__ =codevector_dim
snake_case__ =proj_codevector_dim
snake_case__ =diversity_loss_weight
# ctc loss
snake_case__ =ctc_loss_reduction
snake_case__ =ctc_zero_infinity
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
snake_case__ =classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
snake_case__ =list(_UpperCAmelCase )
snake_case__ =list(_UpperCAmelCase )
snake_case__ =list(_UpperCAmelCase )
snake_case__ =xvector_output_dim
@property
def _lowercase ( self ) -> List[Any]:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 702 |
'''simple docstring'''
import re
import string
import numpy as np
import datasets
SCREAMING_SNAKE_CASE__ : List[Any] = '''
Returns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.
'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = '''
Args:
predictions: List of predicted texts.
references: List of reference texts.
regexes_to_ignore: List, defaults to None. Regex expressions of characters to
ignore when calculating the exact matches. Note: these regexes are removed
from the input data before the changes based on the options below (e.g. ignore_case,
ignore_punctuation, ignore_numbers) are applied.
ignore_case: Boolean, defaults to False. If true, turns everything
to lowercase so that capitalization differences are ignored.
ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before
comparing predictions and references.
ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before
comparing predictions and references.
Returns:
exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.
Examples:
>>> exact_match = datasets.load_metric("exact_match")
>>> refs = ["the cat", "theater", "YELLING", "agent007"]
>>> preds = ["cat?", "theater", "yelling", "agent"]
>>> results = exact_match.compute(references=refs, predictions=preds)
>>> print(round(results["exact_match"], 1))
25.0
>>> exact_match = datasets.load_metric("exact_match")
>>> refs = ["the cat", "theater", "YELLING", "agent007"]
>>> preds = ["cat?", "theater", "yelling", "agent"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell"], ignore_case=True, ignore_punctuation=True)
>>> print(round(results["exact_match"], 1))
50.0
>>> exact_match = datasets.load_metric("exact_match")
>>> refs = ["the cat", "theater", "YELLING", "agent007"]
>>> preds = ["cat?", "theater", "yelling", "agent"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True)
>>> print(round(results["exact_match"], 1))
75.0
>>> exact_match = datasets.load_metric("exact_match")
>>> refs = ["the cat", "theater", "YELLING", "agent007"]
>>> preds = ["cat?", "theater", "yelling", "agent"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)
>>> print(round(results["exact_match"], 1))
100.0
>>> exact_match = datasets.load_metric("exact_match")
>>> refs = ["The cat sat on the mat.", "Theaters are great.", "It\'s like comparing oranges and apples."]
>>> preds = ["The cat sat on the mat?", "Theaters are great.", "It\'s like comparing apples and oranges."]
>>> results = exact_match.compute(references=refs, predictions=preds)
>>> print(round(results["exact_match"], 1))
33.3
'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = '''
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a__( datasets.Metric ):
def _lowercase ( self ) -> Optional[int]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , reference_urls=[] , )
def _lowercase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=False , ) -> Any:
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
snake_case__ =np.array([re.sub(_UpperCAmelCase , '' , _UpperCAmelCase ) for x in predictions] )
snake_case__ =np.array([re.sub(_UpperCAmelCase , '' , _UpperCAmelCase ) for x in references] )
else:
snake_case__ =np.asarray(_UpperCAmelCase )
snake_case__ =np.asarray(_UpperCAmelCase )
if ignore_case:
snake_case__ =np.char.lower(_UpperCAmelCase )
snake_case__ =np.char.lower(_UpperCAmelCase )
if ignore_punctuation:
snake_case__ =string.punctuation.maketrans('' , '' , string.punctuation )
snake_case__ =np.char.translate(_UpperCAmelCase , table=_UpperCAmelCase )
snake_case__ =np.char.translate(_UpperCAmelCase , table=_UpperCAmelCase )
if ignore_numbers:
snake_case__ =string.digits.maketrans('' , '' , string.digits )
snake_case__ =np.char.translate(_UpperCAmelCase , table=_UpperCAmelCase )
snake_case__ =np.char.translate(_UpperCAmelCase , table=_UpperCAmelCase )
snake_case__ =predictions == references
return {"exact_match": np.mean(_UpperCAmelCase ) * 100}
| 581 | 0 |
"""simple docstring"""
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
lowerCAmelCase__ = logging.getLogger()
@unittest.skip("Temporarily disable the doc tests.")
@require_torch
@require_tf
@slow
class __snake_case ( unittest.TestCase):
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __lowerCAmelCase : Path , __lowerCAmelCase : Union[str, None] = None , __lowerCAmelCase : Union[List[str], None] = None , __lowerCAmelCase : Union[str, List[str], None] = None , __lowerCAmelCase : bool = True , ):
"""simple docstring"""
_lowerCamelCase : List[str] = [file for file in os.listdir(__lowerCAmelCase ) if os.path.isfile(os.path.join(__lowerCAmelCase , __lowerCAmelCase ) )]
if identifier is not None:
_lowerCamelCase : str = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
for n_ in n_identifier:
_lowerCamelCase : List[Any] = [file for file in files if n_ not in file]
else:
_lowerCamelCase : Tuple = [file for file in files if n_identifier not in file]
_lowerCamelCase : Tuple = ignore_files or []
ignore_files.append('''__init__.py''' )
_lowerCamelCase : str = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print('''Testing''' , __lowerCAmelCase )
if only_modules:
_lowerCamelCase : Any = file.split('''.''' )[0]
try:
_lowerCamelCase : List[str] = getattr(__lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : Tuple = doctest.DocTestSuite(__lowerCAmelCase )
_lowerCamelCase : str = unittest.TextTestRunner().run(__lowerCAmelCase )
self.assertIs(len(result.failures ) , 0 )
except AttributeError:
logger.info(f'''{module_identifier} is not a module.''' )
else:
_lowerCamelCase : List[str] = doctest.testfile(str('''..''' / directory / file ) , optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed , 0 )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
_lowerCamelCase : List[str] = Path('''src/transformers''' )
_lowerCamelCase : List[str] = '''modeling'''
_lowerCamelCase : Optional[int] = [
'''modeling_ctrl.py''',
'''modeling_tf_ctrl.py''',
]
self.analyze_directory(__lowerCAmelCase , identifier=__lowerCAmelCase , ignore_files=__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = Path('''src/transformers''' )
_lowerCamelCase : Any = '''tokenization'''
self.analyze_directory(__lowerCAmelCase , identifier=__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
_lowerCamelCase : Tuple = Path('''src/transformers''' )
_lowerCamelCase : Optional[Any] = '''configuration'''
self.analyze_directory(__lowerCAmelCase , identifier=__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
_lowerCamelCase : Tuple = Path('''src/transformers''' )
_lowerCamelCase : Union[str, Any] = ['''configuration''', '''modeling''', '''tokenization''']
self.analyze_directory(__lowerCAmelCase , n_identifier=__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
_lowerCamelCase : int = Path('''docs/source''' )
_lowerCamelCase : List[Any] = ['''favicon.ico''']
self.analyze_directory(__lowerCAmelCase , ignore_files=__lowerCAmelCase , only_modules=__lowerCAmelCase )
| 83 |
'''simple docstring'''
from collections.abc import Sequence
def _UpperCAmelCase ( _lowerCamelCase : Sequence[float] , _lowerCamelCase : float ) -> float:
return sum(c * (x**i) for i, c in enumerate(_lowerCamelCase ) )
def _UpperCAmelCase ( _lowerCamelCase : Sequence[float] , _lowerCamelCase : float ) -> float:
_lowerCAmelCase : List[Any] = 0.0
for coeff in reversed(_lowerCamelCase ):
_lowerCAmelCase : Optional[Any] = result * x + coeff
return result
if __name__ == "__main__":
UpperCamelCase_ = (0.0, 0.0, 5.0, 9.3, 7.0)
UpperCamelCase_ = 1_0.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 384 | 0 |
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : int , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any]=13 , __lowerCamelCase : List[str]=3 , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : int=True , __lowerCamelCase : Optional[int]=0.1 , __lowerCamelCase : Optional[int]=0.1 , __lowerCamelCase : int=224 , __lowerCamelCase : List[str]=1000 , __lowerCamelCase : Optional[Any]=[3, 3, 6, 4] , __lowerCamelCase : List[Any]=[48, 56, 112, 220] , ):
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = num_labels
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = layer_depths
SCREAMING_SNAKE_CASE = embed_dims
def _snake_case ( self : List[Any] ):
SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE = None
if self.use_labels:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_labels )
SCREAMING_SNAKE_CASE = self.get_config()
return config, pixel_values, labels
def _snake_case ( self : Optional[Any] ):
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act="gelu" , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=A__ , layer_scale_init_value=1e-5 , )
def _snake_case ( self : List[Any] , __lowerCamelCase : Tuple , __lowerCamelCase : str , __lowerCamelCase : List[Any] ):
SCREAMING_SNAKE_CASE = SwiftFormerModel(config=A__ )
model.to(A__ )
model.eval()
SCREAMING_SNAKE_CASE = model(A__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) )
def _snake_case ( self : List[Any] , __lowerCamelCase : Tuple , __lowerCamelCase : int , __lowerCamelCase : int ):
SCREAMING_SNAKE_CASE = self.num_labels
SCREAMING_SNAKE_CASE = SwiftFormerForImageClassification(A__ )
model.to(A__ )
model.eval()
SCREAMING_SNAKE_CASE = model(A__ , labels=A__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
SCREAMING_SNAKE_CASE = SwiftFormerForImageClassification(A__ )
model.to(A__ )
model.eval()
SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE = model(A__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _snake_case ( self : Tuple ):
(SCREAMING_SNAKE_CASE) = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
lowerCamelCase__ = (
{'''feature-extraction''': SwiftFormerModel, '''image-classification''': SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
def _snake_case ( self : Dict ):
SCREAMING_SNAKE_CASE = SwiftFormerModelTester(self )
SCREAMING_SNAKE_CASE = ConfigTester(
self , config_class=A__ , has_text_modality=A__ , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , )
def _snake_case ( self : int ):
self.config_tester.run_common_tests()
@unittest.skip(reason="SwiftFormer does not use inputs_embeds" )
def _snake_case ( self : List[Any] ):
pass
def _snake_case ( self : int ):
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(A__ )
SCREAMING_SNAKE_CASE = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(A__ , nn.Linear ) )
def _snake_case ( self : str ):
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(A__ )
SCREAMING_SNAKE_CASE = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , A__ )
def _snake_case ( self : Dict ):
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A__ )
def _snake_case ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A__ )
@slow
def _snake_case ( self : str ):
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE = SwiftFormerModel.from_pretrained(A__ )
self.assertIsNotNone(A__ )
@unittest.skip(reason="SwiftFormer does not output attentions" )
def _snake_case ( self : str ):
pass
def _snake_case ( self : Optional[int] ):
def check_hidden_states_output(__lowerCamelCase : Tuple , __lowerCamelCase : List[str] , __lowerCamelCase : Optional[int] ):
SCREAMING_SNAKE_CASE = model_class(A__ )
model.to(A__ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(A__ , A__ ) )
SCREAMING_SNAKE_CASE = outputs.hidden_states
SCREAMING_SNAKE_CASE = 8
self.assertEqual(len(A__ ) , A__ ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(A__ ) ):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) , )
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = True
check_hidden_states_output(A__ , A__ , A__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE = True
check_hidden_states_output(A__ , A__ , A__ )
def _snake_case ( self : List[Any] ):
def _config_zero_init(__lowerCamelCase : Tuple ):
SCREAMING_SNAKE_CASE = copy.deepcopy(A__ )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(A__ , A__ , 1e-10 )
if isinstance(getattr(A__ , A__ , A__ ) , A__ ):
SCREAMING_SNAKE_CASE = _config_zero_init(getattr(A__ , A__ ) )
setattr(A__ , A__ , A__ )
return configs_no_init
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = _config_zero_init(A__ )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(config=A__ )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9) / 1e9).round().item() , [0.0, 1.0] , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def _snake_case ( self : Optional[int] ):
pass
def __a ( ):
SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _snake_case ( self : str ):
return ViTImageProcessor.from_pretrained("MBZUAI/swiftformer-xs" ) if is_vision_available() else None
@slow
def _snake_case ( self : Tuple ):
SCREAMING_SNAKE_CASE = SwiftFormerForImageClassification.from_pretrained("MBZUAI/swiftformer-xs" ).to(A__ )
SCREAMING_SNAKE_CASE = self.default_image_processor
SCREAMING_SNAKE_CASE = prepare_img()
SCREAMING_SNAKE_CASE = image_processor(images=A__ , return_tensors="pt" ).to(A__ )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**A__ )
# verify the logits
SCREAMING_SNAKE_CASE = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , A__ )
SCREAMING_SNAKE_CASE = torch.tensor([[-2.17_03e00, 2.11_07e00, -2.08_11e00]] ).to(A__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , A__ , atol=1e-4 ) ) | 720 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ = "facebook/bart-large-mnli"
lowerCamelCase__ = (
"This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which "
"should be the text to classify, and `labels`, which should be the list of labels to use for classification. "
"It returns the most likely label in the list of provided `labels` for the input text."
)
lowerCamelCase__ = "text_classifier"
lowerCamelCase__ = AutoTokenizer
lowerCamelCase__ = AutoModelForSequenceClassification
lowerCamelCase__ = ["text", ["text"]]
lowerCamelCase__ = ["text"]
def _snake_case ( self : Optional[Any] ):
super().setup()
SCREAMING_SNAKE_CASE = self.model.config
SCREAMING_SNAKE_CASE = -1
for idx, label in config.idalabel.items():
if label.lower().startswith("entail" ):
SCREAMING_SNAKE_CASE = int(__lowerCamelCase )
if self.entailment_id == -1:
raise ValueError("Could not determine the entailment ID from the model config, please pass it at init." )
def _snake_case ( self : List[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Any] ):
SCREAMING_SNAKE_CASE = labels
return self.pre_processor(
[text] * len(__lowerCamelCase ) , [f"This example is {label}" for label in labels] , return_tensors="pt" , padding="max_length" , )
def _snake_case ( self : str , __lowerCamelCase : Optional[int] ):
SCREAMING_SNAKE_CASE = outputs.logits
SCREAMING_SNAKE_CASE = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id] | 698 | 0 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class a ( __A , unittest.TestCase ):
UpperCamelCase : List[str] = GPTSanJapaneseTokenizer
UpperCamelCase : Any = False
UpperCamelCase : str = {"""do_clean_text""": False, """add_prefix_space""": False}
def __snake_case ( self ):
super().setUp()
# fmt: off
UpperCAmelCase__ : List[str] = ['こん', 'こんに', 'にちは', 'ばんは', '世界,㔺界', '、', '。', '<BR>', '<SP>', '<TAB>', '<URL>', '<EMAIL>', '<TEL>', '<DATE>', '<PRICE>', '<BLOCK>', '<KIGOU>', '<U2000U2BFF>', '<|emoji1|>', '<unk>', '<|bagoftoken|>', '<|endoftext|>']
# fmt: on
UpperCAmelCase__ : Union[str, Any] = {'emoji': {'\ud83d\ude00': '<|emoji1|>'}, 'emoji_inv': {'<|emoji1|>': '\ud83d\ude00'}} # 😀
UpperCAmelCase__ : Any = {'unk_token': '<unk>'}
UpperCAmelCase__ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
UpperCAmelCase__ : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['emoji_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
with open(self.emoji_file , 'w' ) as emoji_writer:
emoji_writer.write(json.dumps(UpperCamelCase_ ) )
def __snake_case ( self , **UpperCamelCase_ ):
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase_ )
def __snake_case ( self , UpperCamelCase_ ):
UpperCAmelCase__ : Union[str, Any] = 'こんにちは、世界。 \nこんばんは、㔺界。😀'
UpperCAmelCase__ : Optional[Any] = 'こんにちは、世界。 \nこんばんは、世界。😀'
return input_text, output_text
def __snake_case ( self , UpperCamelCase_ ):
UpperCAmelCase__ : List[Any] = self.get_input_output_texts(UpperCamelCase_ )
UpperCAmelCase__ : Any = tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
UpperCAmelCase__ : List[str] = tokenizer.decode(UpperCamelCase_ , clean_up_tokenization_spaces=UpperCamelCase_ )
return text, ids
def __snake_case ( self ):
pass # TODO add if relevant
def __snake_case ( self ):
pass # TODO add if relevant
def __snake_case ( self ):
pass # TODO add if relevant
def __snake_case ( self ):
UpperCAmelCase__ : Any = self.get_tokenizer()
# Testing tokenization
UpperCAmelCase__ : List[str] = 'こんにちは、世界。 こんばんは、㔺界。'
UpperCAmelCase__ : Tuple = ['こん', 'にちは', '、', '世界', '。', '<SP>', 'こん', 'ばんは', '、', '㔺界', '。']
UpperCAmelCase__ : int = tokenizer.tokenize(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
# Testing conversion to ids without special tokens
UpperCAmelCase__ : str = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
UpperCAmelCase__ : Union[str, Any] = tokenizer.convert_tokens_to_ids(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
# Testing conversion to ids with special tokens
UpperCAmelCase__ : List[Any] = tokens + [tokenizer.unk_token]
UpperCAmelCase__ : Optional[int] = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]
UpperCAmelCase__ : int = tokenizer.convert_tokens_to_ids(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
def __snake_case ( self ):
UpperCAmelCase__ : List[str] = self.get_tokenizer()
# Testing tokenization
UpperCAmelCase__ : Union[str, Any] = 'こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。'
UpperCAmelCase__ : List[Any] = 'こんにちは、、、、世界。こんばんは、、、、世界。'
UpperCAmelCase__ : str = tokenizer.encode(UpperCamelCase_ )
UpperCAmelCase__ : Optional[int] = tokenizer.decode(UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
@slow
def __snake_case ( self ):
UpperCAmelCase__ : Union[str, Any] = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
# Testing tokenization
UpperCAmelCase__ : List[str] = 'こんにちは、世界。'
UpperCAmelCase__ : Dict = 'こんばんは、㔺界。😀'
UpperCAmelCase__ : Optional[Any] = 'こんにちは、世界。こんばんは、世界。😀'
UpperCAmelCase__ : Optional[Any] = tokenizer.encode(prefix_text + input_text )
UpperCAmelCase__ : List[str] = tokenizer.encode('' , prefix_text=prefix_text + input_text )
UpperCAmelCase__ : int = tokenizer.encode(UpperCamelCase_ , prefix_text=UpperCamelCase_ )
UpperCAmelCase__ : Dict = tokenizer.decode(UpperCamelCase_ )
UpperCAmelCase__ : List[str] = tokenizer.decode(UpperCamelCase_ )
UpperCAmelCase__ : Optional[int] = tokenizer.decode(UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
@slow
def __snake_case ( self ):
UpperCAmelCase__ : List[str] = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
# Testing tokenization
UpperCAmelCase__ : Optional[Any] = 'こんにちは、世界。'
UpperCAmelCase__ : Optional[Any] = 'こんばんは、㔺界。😀'
UpperCAmelCase__ : Optional[Any] = len(tokenizer.encode(UpperCamelCase_ ) ) - 2
UpperCAmelCase__ : Optional[int] = len(tokenizer.encode(UpperCamelCase_ ) ) - 2
UpperCAmelCase__ : int = [1] + [0] * (len_prefix + len_text + 1)
UpperCAmelCase__ : List[str] = [1] * (len_prefix + len_text + 1) + [0]
UpperCAmelCase__ : str = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
UpperCAmelCase__ : Optional[int] = tokenizer(prefix_text + input_text ).token_type_ids
UpperCAmelCase__ : Tuple = tokenizer('' , prefix_text=prefix_text + input_text ).token_type_ids
UpperCAmelCase__ : List[Any] = tokenizer(UpperCamelCase_ , prefix_text=UpperCamelCase_ ).token_type_ids
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
@slow
def __snake_case ( self ):
UpperCAmelCase__ : str = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
UpperCAmelCase__ : List[str] = tokenizer.encode('あンいワ' )
UpperCAmelCase__ : int = tokenizer.encode('' , prefix_text='あンいワ' )
UpperCAmelCase__ : Dict = tokenizer.encode('いワ' , prefix_text='あン' )
self.assertEqual(tokenizer.decode(UpperCamelCase_ ) , tokenizer.decode(UpperCamelCase_ ) )
self.assertEqual(tokenizer.decode(UpperCamelCase_ ) , tokenizer.decode(UpperCamelCase_ ) )
self.assertNotEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertNotEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def __snake_case ( self ):
UpperCAmelCase__ : Optional[Any] = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
UpperCAmelCase__ : Optional[int] = [['武田信玄', 'は、'], ['織田信長', 'の配下の、']]
UpperCAmelCase__ : str = tokenizer(UpperCamelCase_ , padding=UpperCamelCase_ )
UpperCAmelCase__ : Any = tokenizer.batch_encode_plus(UpperCamelCase_ , padding=UpperCamelCase_ )
# fmt: off
UpperCAmelCase__ : Optional[int] = [[35_993, 8_640, 25_948, 35_998, 30_647, 35_675, 35_999, 35_999], [35_993, 10_382, 9_868, 35_998, 30_646, 9_459, 30_646, 35_675]]
UpperCAmelCase__ : Union[str, Any] = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
UpperCAmelCase__ : Dict = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , UpperCamelCase_ )
self.assertListEqual(x_token.token_type_ids , UpperCamelCase_ )
self.assertListEqual(x_token.attention_mask , UpperCamelCase_ )
self.assertListEqual(x_token_a.input_ids , UpperCamelCase_ )
self.assertListEqual(x_token_a.token_type_ids , UpperCamelCase_ )
self.assertListEqual(x_token_a.attention_mask , UpperCamelCase_ )
def __snake_case ( self ):
pass
def __snake_case ( self ):
pass
| 110 | from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
_UpperCAmelCase = logging.get_logger(__name__)
# General docstring
_UpperCAmelCase = """RegNetConfig"""
# Base docstring
_UpperCAmelCase = """facebook/regnet-y-040"""
_UpperCAmelCase = [1, 1088, 7, 7]
# Image classification docstring
_UpperCAmelCase = """facebook/regnet-y-040"""
_UpperCAmelCase = """tabby, tabby cat"""
_UpperCAmelCase = [
"""facebook/regnet-y-040""",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , lowercase , lowercase = 3 , lowercase = 1 , lowercase = 1 , lowercase = "relu" , **lowercase , ):
"""simple docstring"""
super().__init__(**lowercase )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
A_ : List[str] = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
A_ : Tuple = tf.keras.layers.ConvaD(
filters=lowercase , kernel_size=lowercase , strides=lowercase , padding='VALID' , groups=lowercase , use_bias=lowercase , name='convolution' , )
A_ : Dict = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name='normalization' )
A_ : Optional[int] = ACTaFN[activation] if activation is not None else tf.identity
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : int = self.convolution(self.padding(lowercase ) )
A_ : Optional[Any] = self.normalization(lowercase )
A_ : Union[str, Any] = self.activation(lowercase )
return hidden_state
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , lowercase , **lowercase ):
"""simple docstring"""
super().__init__(**lowercase )
A_ : Dict = config.num_channels
A_ : Dict = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name='embedder' , )
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : Union[str, Any] = shape_list(lowercase )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
A_ : Optional[Any] = tf.transpose(lowercase , perm=(0, 2, 3, 1) )
A_ : Dict = self.embedder(lowercase )
return hidden_state
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , lowercase , lowercase = 2 , **lowercase ):
"""simple docstring"""
super().__init__(**lowercase )
A_ : Optional[int] = tf.keras.layers.ConvaD(
filters=lowercase , kernel_size=1 , strides=lowercase , use_bias=lowercase , name='convolution' )
A_ : Tuple = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name='normalization' )
def lowerCAmelCase_ ( self , lowercase , lowercase = False ):
"""simple docstring"""
return self.normalization(self.convolution(lowercase ) , training=lowercase )
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , lowercase , lowercase , **lowercase ):
"""simple docstring"""
super().__init__(**lowercase )
A_ : List[Any] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=lowercase , name='pooler' )
A_ : Any = [
tf.keras.layers.ConvaD(filters=lowercase , kernel_size=1 , activation='relu' , name='attention.0' ),
tf.keras.layers.ConvaD(filters=lowercase , kernel_size=1 , activation='sigmoid' , name='attention.2' ),
]
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : str = self.pooler(lowercase )
for layer_module in self.attention:
A_ : List[str] = layer_module(lowercase )
A_ : str = hidden_state * pooled
return hidden_state
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , lowercase , lowercase , lowercase , lowercase = 1 , **lowercase ):
"""simple docstring"""
super().__init__(**lowercase )
A_ : Union[str, Any] = in_channels != out_channels or stride != 1
A_ : Optional[Any] = max(1 , out_channels // config.groups_width )
A_ : List[Any] = (
TFRegNetShortCut(lowercase , stride=lowercase , name='shortcut' )
if should_apply_shortcut
else tf.keras.layers.Activation('linear' , name='shortcut' )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
A_ : Optional[Any] = [
TFRegNetConvLayer(lowercase , kernel_size=1 , activation=config.hidden_act , name='layer.0' ),
TFRegNetConvLayer(
lowercase , stride=lowercase , groups=lowercase , activation=config.hidden_act , name='layer.1' ),
TFRegNetConvLayer(lowercase , kernel_size=1 , activation=lowercase , name='layer.2' ),
]
A_ : int = ACTaFN[config.hidden_act]
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : Optional[Any] = hidden_state
for layer_module in self.layers:
A_ : Union[str, Any] = layer_module(lowercase )
A_ : int = self.shortcut(lowercase )
hidden_state += residual
A_ : Optional[int] = self.activation(lowercase )
return hidden_state
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , lowercase , lowercase , lowercase , lowercase = 1 , **lowercase ):
"""simple docstring"""
super().__init__(**lowercase )
A_ : Any = in_channels != out_channels or stride != 1
A_ : Union[str, Any] = max(1 , out_channels // config.groups_width )
A_ : str = (
TFRegNetShortCut(lowercase , stride=lowercase , name='shortcut' )
if should_apply_shortcut
else tf.keras.layers.Activation('linear' , name='shortcut' )
)
A_ : Optional[int] = [
TFRegNetConvLayer(lowercase , kernel_size=1 , activation=config.hidden_act , name='layer.0' ),
TFRegNetConvLayer(
lowercase , stride=lowercase , groups=lowercase , activation=config.hidden_act , name='layer.1' ),
TFRegNetSELayer(lowercase , reduced_channels=int(round(in_channels / 4 ) ) , name='layer.2' ),
TFRegNetConvLayer(lowercase , kernel_size=1 , activation=lowercase , name='layer.3' ),
]
A_ : str = ACTaFN[config.hidden_act]
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : Any = hidden_state
for layer_module in self.layers:
A_ : Optional[Any] = layer_module(lowercase )
A_ : Optional[Any] = self.shortcut(lowercase )
hidden_state += residual
A_ : Tuple = self.activation(lowercase )
return hidden_state
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , lowercase , lowercase , lowercase , lowercase = 2 , lowercase = 2 , **lowercase ):
"""simple docstring"""
super().__init__(**lowercase )
A_ : List[Any] = TFRegNetXLayer if config.layer_type == 'x' else TFRegNetYLayer
A_ : int = [
# downsampling is done in the first layer with stride of 2
layer(lowercase , lowercase , lowercase , stride=lowercase , name='layers.0' ),
*[layer(lowercase , lowercase , lowercase , name=F'''layers.{i+1}''' ) for i in range(depth - 1 )],
]
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
for layer_module in self.layers:
A_ : List[str] = layer_module(lowercase )
return hidden_state
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , lowercase , **lowercase ):
"""simple docstring"""
super().__init__(**lowercase )
A_ : Any = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
lowercase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name='stages.0' , ) )
A_ : int = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(lowercase , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(lowercase , lowercase , lowercase , depth=lowercase , name=F'''stages.{i+1}''' ) )
def lowerCAmelCase_ ( self , lowercase , lowercase = False , lowercase = True ):
"""simple docstring"""
A_ : Optional[Any] = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
A_ : Optional[Any] = hidden_states + (hidden_state,)
A_ : List[Any] = stage_module(lowercase )
if output_hidden_states:
A_ : str = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=lowercase , hidden_states=lowercase )
@keras_serializable
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
lowerCamelCase_ = RegNetConfig
def __init__( self , lowercase , **lowercase ):
"""simple docstring"""
super().__init__(**lowercase )
A_ : List[Any] = config
A_ : List[str] = TFRegNetEmbeddings(lowercase , name='embedder' )
A_ : Dict = TFRegNetEncoder(lowercase , name='encoder' )
A_ : Any = tf.keras.layers.GlobalAveragePoolingaD(keepdims=lowercase , name='pooler' )
@unpack_inputs
def lowerCAmelCase_ ( self , lowercase , lowercase = None , lowercase = None , lowercase = False , ):
"""simple docstring"""
A_ : Dict = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
A_ : Any = return_dict if return_dict is not None else self.config.use_return_dict
A_ : int = self.embedder(lowercase , training=lowercase )
A_ : int = self.encoder(
lowercase , output_hidden_states=lowercase , return_dict=lowercase , training=lowercase )
A_ : List[Any] = encoder_outputs[0]
A_ : Any = self.pooler(lowercase )
# Change to NCHW output format have uniformity in the modules
A_ : Optional[int] = tf.transpose(lowercase , perm=(0, 3, 1, 2) )
A_ : List[str] = tf.transpose(lowercase , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
A_ : Dict = tuple([tf.transpose(lowercase , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowercase , pooler_output=lowercase , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class UpperCAmelCase ( __A ):
'''simple docstring'''
lowerCamelCase_ = RegNetConfig
lowerCamelCase_ = '''regnet'''
lowerCamelCase_ = '''pixel_values'''
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 2_2_4, 2_2_4) , dtype=tf.floataa )}
_UpperCAmelCase = r"""
Parameters:
This model is a Tensorflow
[tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a
regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and
behavior.
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
"""
_UpperCAmelCase = r"""
Args:
pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConveNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
'''The bare RegNet model outputting raw features without any specific head on top.''' , __A , )
class UpperCAmelCase ( __A ):
'''simple docstring'''
def __init__( self , lowercase , *lowercase , **lowercase ):
"""simple docstring"""
super().__init__(lowercase , *lowercase , **lowercase )
A_ : Union[str, Any] = TFRegNetMainLayer(lowercase , name='regnet' )
@unpack_inputs
@add_start_docstrings_to_model_forward(lowercase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowercase , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def lowerCAmelCase_ ( self , lowercase , lowercase = None , lowercase = None , lowercase=False , ):
"""simple docstring"""
A_ : Union[str, Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
A_ : str = return_dict if return_dict is not None else self.config.use_return_dict
A_ : int = self.regnet(
pixel_values=lowercase , output_hidden_states=lowercase , return_dict=lowercase , training=lowercase , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
'''
RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
''' , __A , )
class UpperCAmelCase ( __A , __A ):
'''simple docstring'''
def __init__( self , lowercase , *lowercase , **lowercase ):
"""simple docstring"""
super().__init__(lowercase , *lowercase , **lowercase )
A_ : str = config.num_labels
A_ : Optional[Any] = TFRegNetMainLayer(lowercase , name='regnet' )
# classification head
A_ : int = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name='classifier.1' ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(lowercase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowercase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def lowerCAmelCase_ ( self , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase=False , ):
"""simple docstring"""
A_ : Any = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
A_ : Union[str, Any] = return_dict if return_dict is not None else self.config.use_return_dict
A_ : Dict = self.regnet(
lowercase , output_hidden_states=lowercase , return_dict=lowercase , training=lowercase )
A_ : Any = outputs.pooler_output if return_dict else outputs[1]
A_ : Union[str, Any] = self.classifier[0](lowercase )
A_ : Dict = self.classifier[1](lowercase )
A_ : Dict = None if labels is None else self.hf_compute_loss(labels=lowercase , logits=lowercase )
if not return_dict:
A_ : List[str] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=lowercase , logits=lowercase , hidden_states=outputs.hidden_states )
| 558 | 0 |
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_lowercase = logging.get_logger(__name__)
def lowerCamelCase__ ( a , a , a , a ):
__snake_case = original_name.split('.' )[0]
__snake_case = key.split('.' )
__snake_case = int(key_list[key_list.index(a ) - 2] )
__snake_case = int(key_list[key_list.index(a ) - 1] )
__snake_case = orig_block_num - offset
__snake_case = key.replace(f'{orig_block_num}.{layer_num}.{original_name}' , f'block.{new_block_num}.{layer_num}.{new_name}' )
return key
def lowerCamelCase__ ( a ):
__snake_case = OrderedDict()
__snake_case , __snake_case = 0, 0
for key, value in state_dict.items():
if key.startswith('network' ):
__snake_case = key.replace('network' , 'poolformer.encoder' )
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith('bias' ) and "patch_embed" not in key:
patch_emb_offset += 1
__snake_case = key[: key.find('proj' )]
__snake_case = key.replace(a , f'patch_embeddings.{total_embed_found}.' )
__snake_case = key.replace('proj' , 'projection' )
if key.endswith('bias' ):
total_embed_found += 1
if "patch_embeddings" in key:
__snake_case = 'poolformer.encoder.' + key
if "mlp.fc1" in key:
__snake_case = replace_key_with_offset(a , a , 'mlp.fc1' , 'output.conv1' )
if "mlp.fc2" in key:
__snake_case = replace_key_with_offset(a , a , 'mlp.fc2' , 'output.conv2' )
if "norm1" in key:
__snake_case = replace_key_with_offset(a , a , 'norm1' , 'before_norm' )
if "norm2" in key:
__snake_case = replace_key_with_offset(a , a , 'norm2' , 'after_norm' )
if "layer_scale_1" in key:
__snake_case = replace_key_with_offset(a , a , 'layer_scale_1' , 'layer_scale_1' )
if "layer_scale_2" in key:
__snake_case = replace_key_with_offset(a , a , 'layer_scale_2' , 'layer_scale_2' )
if "head" in key:
__snake_case = key.replace('head' , 'classifier' )
__snake_case = value
return new_state_dict
def lowerCamelCase__ ( ):
__snake_case = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__snake_case = Image.open(requests.get(a , stream=a ).raw )
return image
@torch.no_grad()
def lowerCamelCase__ ( a , a , a ):
__snake_case = PoolFormerConfig()
# set attributes based on model_name
__snake_case = 'huggingface/label-files'
__snake_case = model_name[-3:]
__snake_case = 1000
__snake_case = 'imagenet-1k-id2label.json'
__snake_case = (1, 1000)
# set config attributes
__snake_case = json.load(open(hf_hub_download(a , a , repo_type='dataset' ) , 'r' ) )
__snake_case = {int(a ): v for k, v in idalabel.items()}
__snake_case = idalabel
__snake_case = {v: k for k, v in idalabel.items()}
if size == "s12":
__snake_case = [2, 2, 6, 2]
__snake_case = [64, 128, 320, 512]
__snake_case = 4.0
__snake_case = 0.9
elif size == "s24":
__snake_case = [4, 4, 12, 4]
__snake_case = [64, 128, 320, 512]
__snake_case = 4.0
__snake_case = 0.9
elif size == "s36":
__snake_case = [6, 6, 18, 6]
__snake_case = [64, 128, 320, 512]
__snake_case = 4.0
__snake_case = 1E-6
__snake_case = 0.9
elif size == "m36":
__snake_case = [6, 6, 18, 6]
__snake_case = [96, 192, 384, 768]
__snake_case = 4.0
__snake_case = 1E-6
__snake_case = 0.95
elif size == "m48":
__snake_case = [8, 8, 24, 8]
__snake_case = [96, 192, 384, 768]
__snake_case = 4.0
__snake_case = 1E-6
__snake_case = 0.95
else:
raise ValueError(f'Size {size} not supported' )
# load image processor
__snake_case = PoolFormerImageProcessor(crop_pct=a )
# Prepare image
__snake_case = prepare_img()
__snake_case = image_processor(images=a , return_tensors='pt' ).pixel_values
logger.info(f'Converting model {model_name}...' )
# load original state dict
__snake_case = torch.load(a , map_location=torch.device('cpu' ) )
# rename keys
__snake_case = rename_keys(a )
# create HuggingFace model and load state dict
__snake_case = PoolFormerForImageClassification(a )
model.load_state_dict(a )
model.eval()
# Define image processor
__snake_case = PoolFormerImageProcessor(crop_pct=a )
__snake_case = image_processor(images=prepare_img() , return_tensors='pt' ).pixel_values
# forward pass
__snake_case = model(a )
__snake_case = outputs.logits
# define expected logit slices for different models
if size == "s12":
__snake_case = torch.tensor([-0.3_045, -0.6_758, -0.4_869] )
elif size == "s24":
__snake_case = torch.tensor([0.4_402, -0.1_374, -0.8_045] )
elif size == "s36":
__snake_case = torch.tensor([-0.6_080, -0.5_133, -0.5_898] )
elif size == "m36":
__snake_case = torch.tensor([0.3_952, 0.2_263, -1.2_668] )
elif size == "m48":
__snake_case = torch.tensor([0.1_167, -0.0_656, -0.3_423] )
else:
raise ValueError(f'Size {size} not supported' )
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3] , a , atol=1E-2 )
# finally, save model and image processor
logger.info(f'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' )
Path(a ).mkdir(exist_ok=a )
model.save_pretrained(a )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(a )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
parser.add_argument(
"""--model_name""",
default="""poolformer_s12""",
type=str,
help="""Name of the model you'd like to convert.""",
)
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, help="""Path to the original PyTorch checkpoint (.pth file)."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
_lowercase = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 427 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_lowercase = {
"""configuration_swiftformer""": [
"""SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""SwiftFormerConfig""",
"""SwiftFormerOnnxConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
"""SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SwiftFormerForImageClassification""",
"""SwiftFormerModel""",
"""SwiftFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 427 | 1 |
from typing import Dict, Iterable, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
lowercase_ = logging.get_logger(__name__)
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return [
int(1000 * (box[0] / width) ),
int(1000 * (box[1] / height) ),
int(1000 * (box[2] / width) ),
int(1000 * (box[3] / height) ),
]
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowercase__ = to_pil_image(__SCREAMING_SNAKE_CASE )
lowercase__ = pil_image.size
lowercase__ = pytesseract.image_to_data(__SCREAMING_SNAKE_CASE , lang=__SCREAMING_SNAKE_CASE , output_type="dict" , config=__SCREAMING_SNAKE_CASE )
lowercase__ = data["text"], data["left"], data["top"], data["width"], data["height"]
# filter empty words and corresponding coordinates
lowercase__ = [idx for idx, word in enumerate(__SCREAMING_SNAKE_CASE ) if not word.strip()]
lowercase__ = [word for idx, word in enumerate(__SCREAMING_SNAKE_CASE ) if idx not in irrelevant_indices]
lowercase__ = [coord for idx, coord in enumerate(__SCREAMING_SNAKE_CASE ) if idx not in irrelevant_indices]
lowercase__ = [coord for idx, coord in enumerate(__SCREAMING_SNAKE_CASE ) if idx not in irrelevant_indices]
lowercase__ = [coord for idx, coord in enumerate(__SCREAMING_SNAKE_CASE ) if idx not in irrelevant_indices]
lowercase__ = [coord for idx, coord in enumerate(__SCREAMING_SNAKE_CASE ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
lowercase__ = []
for x, y, w, h in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase__ = [x, y, x + w, y + h]
actual_boxes.append(__SCREAMING_SNAKE_CASE )
# finally, normalize the bounding boxes
lowercase__ = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
assert len(__SCREAMING_SNAKE_CASE ) == len(__SCREAMING_SNAKE_CASE ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class _snake_case ( __snake_case):
UpperCamelCase__ : List[Any] =["""pixel_values"""]
def __init__( self : Tuple, __lowercase : bool = True, __lowercase : Dict[str, int] = None, __lowercase : PILImageResampling = PILImageResampling.BILINEAR, __lowercase : bool = True, __lowercase : float = 1 / 255, __lowercase : bool = True, __lowercase : Union[float, Iterable[float]] = None, __lowercase : Union[float, Iterable[float]] = None, __lowercase : bool = True, __lowercase : Optional[str] = None, __lowercase : Optional[str] = "", **__lowercase : Optional[Any], ):
super().__init__(**UpperCamelCase__ )
lowercase__ = size if size is not None else {"height": 224, "width": 224}
lowercase__ = get_size_dict(UpperCamelCase__ )
lowercase__ = do_resize
lowercase__ = size
lowercase__ = resample
lowercase__ = do_rescale
lowercase__ = rescale_value
lowercase__ = do_normalize
lowercase__ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowercase__ = image_std if image_std is not None else IMAGENET_STANDARD_STD
lowercase__ = apply_ocr
lowercase__ = ocr_lang
lowercase__ = tesseract_config
def A__ ( self : int, __lowercase : np.ndarray, __lowercase : Dict[str, int], __lowercase : PILImageResampling = PILImageResampling.BILINEAR, __lowercase : Optional[Union[str, ChannelDimension]] = None, **__lowercase : List[str], ):
lowercase__ = get_size_dict(UpperCamelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(F'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' )
lowercase__ = (size["height"], size["width"])
return resize(UpperCamelCase__, size=UpperCamelCase__, resample=UpperCamelCase__, data_format=UpperCamelCase__, **UpperCamelCase__ )
def A__ ( self : Optional[Any], __lowercase : np.ndarray, __lowercase : Union[int, float], __lowercase : Optional[Union[str, ChannelDimension]] = None, **__lowercase : Tuple, ):
return rescale(UpperCamelCase__, scale=UpperCamelCase__, data_format=UpperCamelCase__, **UpperCamelCase__ )
def A__ ( self : List[str], __lowercase : np.ndarray, __lowercase : Union[float, Iterable[float]], __lowercase : Union[float, Iterable[float]], __lowercase : Optional[Union[str, ChannelDimension]] = None, **__lowercase : List[str], ):
return normalize(UpperCamelCase__, mean=UpperCamelCase__, std=UpperCamelCase__, data_format=UpperCamelCase__, **UpperCamelCase__ )
def A__ ( self : List[str], __lowercase : ImageInput, __lowercase : bool = None, __lowercase : Dict[str, int] = None, __lowercase : Optional[Any]=None, __lowercase : bool = None, __lowercase : float = None, __lowercase : bool = None, __lowercase : Union[float, Iterable[float]] = None, __lowercase : Union[float, Iterable[float]] = None, __lowercase : bool = None, __lowercase : Optional[str] = None, __lowercase : Optional[str] = None, __lowercase : Optional[Union[str, TensorType]] = None, __lowercase : ChannelDimension = ChannelDimension.FIRST, **__lowercase : Optional[int], ):
lowercase__ = do_resize if do_resize is not None else self.do_resize
lowercase__ = size if size is not None else self.size
lowercase__ = get_size_dict(UpperCamelCase__ )
lowercase__ = resample if resample is not None else self.resample
lowercase__ = do_rescale if do_rescale is not None else self.do_rescale
lowercase__ = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase__ = do_normalize if do_normalize is not None else self.do_normalize
lowercase__ = image_mean if image_mean is not None else self.image_mean
lowercase__ = image_std if image_std is not None else self.image_std
lowercase__ = apply_ocr if apply_ocr is not None else self.apply_ocr
lowercase__ = ocr_lang if ocr_lang is not None else self.ocr_lang
lowercase__ = tesseract_config if tesseract_config is not None else self.tesseract_config
lowercase__ = make_list_of_images(UpperCamelCase__ )
if not valid_images(UpperCamelCase__ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("If do_normalize is True, image_mean and image_std must be specified." )
# All transformations expect numpy arrays.
lowercase__ = [to_numpy_array(UpperCamelCase__ ) for image in images]
# Tesseract OCR to get words + normalized bounding boxes
if apply_ocr:
requires_backends(self, "pytesseract" )
lowercase__ = []
lowercase__ = []
for image in images:
lowercase__ = apply_tesseract(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
words_batch.append(UpperCamelCase__ )
boxes_batch.append(UpperCamelCase__ )
if do_resize:
lowercase__ = [self.resize(image=UpperCamelCase__, size=UpperCamelCase__, resample=UpperCamelCase__ ) for image in images]
if do_rescale:
lowercase__ = [self.rescale(image=UpperCamelCase__, scale=UpperCamelCase__ ) for image in images]
if do_normalize:
lowercase__ = [self.normalize(image=UpperCamelCase__, mean=UpperCamelCase__, std=UpperCamelCase__ ) for image in images]
lowercase__ = [to_channel_dimension_format(UpperCamelCase__, UpperCamelCase__ ) for image in images]
lowercase__ = BatchFeature(data={"pixel_values": images}, tensor_type=UpperCamelCase__ )
if apply_ocr:
lowercase__ = words_batch
lowercase__ = boxes_batch
return data
| 413 |
"""simple docstring"""
import importlib
import inspect
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
__A = "src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
__A = importlib.util.spec_from_file_location(
"transformers",
os.path.join(PATH_TO_TRANSFORMERS, "__init__.py"),
submodule_search_locations=[PATH_TO_TRANSFORMERS],
)
__A = spec.loader.load_module()
__A = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
__A = re.compile("\[(.+?)\]\((https://huggingface\.co/.+?)\)")
__A = {
"CLIPConfigMixin",
"DecisionTransformerConfigMixin",
"EncoderDecoderConfigMixin",
"RagConfigMixin",
"SpeechEncoderDecoderConfigMixin",
"VisionEncoderDecoderConfigMixin",
"VisionTextDualEncoderConfigMixin",
}
def a__ ( ) -> int:
__lowerCAmelCase: str = []
for config_class in list(CONFIG_MAPPING.values() ):
__lowerCAmelCase: List[Any] = False
# source code of `config_class`
__lowerCAmelCase: Any = inspect.getsource(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase: List[str] = _re_checkpoint.findall(__SCREAMING_SNAKE_CASE )
for checkpoint in checkpoints:
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
__lowerCAmelCase , __lowerCAmelCase: List[str] = checkpoint
# verify the checkpoint name corresponds to the checkpoint link
__lowerCAmelCase: Tuple = F"https://huggingface.co/{ckpt_name}"
if ckpt_link == ckpt_link_from_name:
__lowerCAmelCase: str = True
break
__lowerCAmelCase: Optional[Any] = config_class.__name__
if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(__SCREAMING_SNAKE_CASE )
if len(__SCREAMING_SNAKE_CASE ) > 0:
__lowerCAmelCase: Union[str, Any] = "\n".join(sorted(__SCREAMING_SNAKE_CASE ) )
raise ValueError(F"The following configurations don't contain any valid checkpoint:\n{message}" )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 346 | 0 |
import warnings
from typing import Dict
import numpy as np
from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
return 1.0 / (1.0 + np.exp(-_outputs))
def __magic_name__ ( lowerCAmelCase_):
'''simple docstring'''
lowerCamelCase_ : List[Any] = np.max(_outputs , axis=-1 , keepdims=lowerCAmelCase_)
lowerCamelCase_ : int = np.exp(_outputs - maxes)
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=lowerCAmelCase_)
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
__UpperCAmelCase : Tuple = '''sigmoid'''
__UpperCAmelCase : List[str] = '''softmax'''
__UpperCAmelCase : Tuple = '''none'''
@add_end_docstrings(
__lowerCamelCase, r'''
return_all_scores (`bool`, *optional*, defaults to `False`):
Whether to return all prediction scores or just the one of the predicted class.
function_to_apply (`str`, *optional*, defaults to `"default"`):
The function to apply to the model outputs in order to retrieve the scores. Accepts four different values:
- `"default"`: if the model has a single label, will apply the sigmoid function on the output. If the model
has several labels, will apply the softmax function on the output.
- `"sigmoid"`: Applies the sigmoid function on the output.
- `"softmax"`: Applies the softmax function on the output.
- `"none"`: Does not apply any function on the output.
''', )
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
__UpperCAmelCase : List[str] = False
__UpperCAmelCase : Any = ClassificationFunction.NONE
def __init__( self , **a_ ):
super().__init__(**a_ )
self.check_model_type(
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if self.framework == "tf"
else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING )
def _UpperCamelCase ( self , a_=None , a_=None , a_="" , **a_ ):
# Using "" as default argument because we're going to use `top_k=None` in user code to declare
# "No top_k"
lowerCamelCase_ : Dict = tokenizer_kwargs
lowerCamelCase_ : Dict = {}
if hasattr(self.model.config , "return_all_scores" ) and return_all_scores is None:
lowerCamelCase_ : Tuple = self.model.config.return_all_scores
if isinstance(a_ , a_ ) or top_k is None:
lowerCamelCase_ : Union[str, Any] = top_k
lowerCamelCase_ : Optional[Any] = False
elif return_all_scores is not None:
warnings.warn(
"`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of"
" `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`." , a_ , )
if return_all_scores:
lowerCamelCase_ : Any = None
else:
lowerCamelCase_ : Optional[int] = 1
if isinstance(a_ , a_ ):
lowerCamelCase_ : Union[str, Any] = ClassificationFunction[function_to_apply.upper()]
if function_to_apply is not None:
lowerCamelCase_ : str = function_to_apply
return preprocess_params, {}, postprocess_params
def __call__( self , *a_ , **a_ ):
lowerCamelCase_ : int = super().__call__(*a_ , **a_ )
# TODO try and retrieve it in a nicer way from _sanitize_parameters.
lowerCamelCase_ : Optional[Any] = "top_k" not in kwargs
if isinstance(args[0] , a_ ) and _legacy:
# This pipeline is odd, and return a list when single item is run
return [result]
else:
return result
def _UpperCamelCase ( self , a_ , **a_ ):
lowerCamelCase_ : str = self.framework
if isinstance(a_ , a_ ):
return self.tokenizer(**a_ , return_tensors=a_ , **a_ )
elif isinstance(a_ , a_ ) and len(a_ ) == 1 and isinstance(inputs[0] , a_ ) and len(inputs[0] ) == 2:
# It used to be valid to use a list of list of list for text pairs, keeping this path for BC
return self.tokenizer(
text=inputs[0][0] , text_pair=inputs[0][1] , return_tensors=a_ , **a_ )
elif isinstance(a_ , a_ ):
# This is likely an invalid usage of the pipeline attempting to pass text pairs.
raise ValueError(
"The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a"
" dictionary `{\"text\": \"My text\", \"text_pair\": \"My pair\"}` in order to send a text pair." )
return self.tokenizer(a_ , return_tensors=a_ , **a_ )
def _UpperCamelCase ( self , a_ ):
return self.model(**a_ )
def _UpperCamelCase ( self , a_ , a_=None , a_=1 , a_=True ):
# `_legacy` is used to determine if we're running the naked pipeline and in backward
# compatibility mode, or if running the pipeline with `pipeline(..., top_k=1)` we're running
# the more natural result containing the list.
# Default value before `set_parameters`
if function_to_apply is None:
if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1:
lowerCamelCase_ : Union[str, Any] = ClassificationFunction.SIGMOID
elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1:
lowerCamelCase_ : str = ClassificationFunction.SOFTMAX
elif hasattr(self.model.config , "function_to_apply" ) and function_to_apply is None:
lowerCamelCase_ : Dict = self.model.config.function_to_apply
else:
lowerCamelCase_ : int = ClassificationFunction.NONE
lowerCamelCase_ : Tuple = model_outputs["logits"][0]
lowerCamelCase_ : List[Any] = outputs.numpy()
if function_to_apply == ClassificationFunction.SIGMOID:
lowerCamelCase_ : Dict = sigmoid(a_ )
elif function_to_apply == ClassificationFunction.SOFTMAX:
lowerCamelCase_ : Optional[Any] = softmax(a_ )
elif function_to_apply == ClassificationFunction.NONE:
lowerCamelCase_ : int = outputs
else:
raise ValueError(F"""Unrecognized `function_to_apply` argument: {function_to_apply}""" )
if top_k == 1 and _legacy:
return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()}
lowerCamelCase_ : Tuple = [
{"label": self.model.config.idalabel[i], "score": score.item()} for i, score in enumerate(a_ )
]
if not _legacy:
dict_scores.sort(key=lambda a_ : x["score"] , reverse=a_ )
if top_k is not None:
lowerCamelCase_ : Any = dict_scores[:top_k]
return dict_scores
| 73 |
def __magic_name__ ( lowerCAmelCase_ , lowerCAmelCase_):
'''simple docstring'''
if digit_amount > 0:
return round(number - int(lowerCAmelCase_) , lowerCAmelCase_)
return number - int(lowerCAmelCase_)
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.3_45, 1))
print(decimal_isolate(35.3_45, 2))
print(decimal_isolate(35.3_45, 3))
print(decimal_isolate(-14.7_89, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.1_23, 1))
print(decimal_isolate(-14.1_23, 2))
print(decimal_isolate(-14.1_23, 3))
| 73 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase : Union[str, Any] = logging.get_logger(__name__)
UpperCAmelCase : Optional[Any] = {
"google/vivit-b-16x2-kinetics400": (
"https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json"
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class __lowercase ( a_ ):
"""simple docstring"""
UpperCamelCase : Optional[int] = "vivit"
def __init__( self , A=2_24 , A=32 , A=[2, 16, 16] , A=3 , A=7_68 , A=12 , A=12 , A=30_72 , A="gelu_fast" , A=0.0 , A=0.0 , A=0.02 , A=1e-0_6 , A=True , **A , ) -> Any:
'''simple docstring'''
lowerCamelCase = hidden_size
lowerCamelCase = num_hidden_layers
lowerCamelCase = num_attention_heads
lowerCamelCase = intermediate_size
lowerCamelCase = hidden_act
lowerCamelCase = hidden_dropout_prob
lowerCamelCase = attention_probs_dropout_prob
lowerCamelCase = initializer_range
lowerCamelCase = layer_norm_eps
lowerCamelCase = image_size
lowerCamelCase = num_frames
lowerCamelCase = tubelet_size
lowerCamelCase = num_channels
lowerCamelCase = qkv_bias
super().__init__(**A )
| 457 |
import itertools
import random
import unittest
import numpy as np
from transformers import is_speech_available
from transformers.testing_utils import require_torch, require_torchaudio
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import SpeechaTextFeatureExtractor
UpperCAmelCase : Any = random.Random()
def __lowerCamelCase ( lowerCamelCase__ : int , lowerCamelCase__ : Union[str, Any]=1.0 , lowerCamelCase__ : Union[str, Any]=None , lowerCamelCase__ : int=None ):
'''simple docstring'''
if rng is None:
lowerCamelCase = global_rng
lowerCamelCase = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , A , A=7 , A=4_00 , A=20_00 , A=24 , A=24 , A=0.0 , A=1_60_00 , A=True , A=True , ) -> str:
'''simple docstring'''
lowerCamelCase = parent
lowerCamelCase = batch_size
lowerCamelCase = min_seq_length
lowerCamelCase = max_seq_length
lowerCamelCase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
lowerCamelCase = feature_size
lowerCamelCase = num_mel_bins
lowerCamelCase = padding_value
lowerCamelCase = sampling_rate
lowerCamelCase = return_attention_mask
lowerCamelCase = do_normalize
def __A ( self ) -> List[str]:
'''simple docstring'''
return {
"feature_size": self.feature_size,
"num_mel_bins": self.num_mel_bins,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def __A ( self , A=False , A=False ) -> Tuple:
'''simple docstring'''
def _flatten(A ):
return list(itertools.chain(*A ) )
if equal_length:
lowerCamelCase = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
lowerCamelCase = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
lowerCamelCase = [np.asarray(A ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class __lowercase ( a_ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase : str = SpeechaTextFeatureExtractor if is_speech_available() else None
def __A ( self ) -> int:
'''simple docstring'''
lowerCamelCase = SpeechaTextFeatureExtractionTester(self )
def __A ( self , A ) -> List[Any]:
'''simple docstring'''
self.assertTrue(np.all(np.mean(A , axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(A , axis=0 ) - 1 ) < 1e-3 ) )
def __A ( self ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
lowerCamelCase = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
lowerCamelCase = [np.asarray(A ) for speech_input in speech_inputs]
# Test feature size
lowerCamelCase = feature_extractor(A , padding=A , return_tensors="""np""" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size )
# Test not batched input
lowerCamelCase = feature_extractor(speech_inputs[0] , return_tensors="""np""" ).input_features
lowerCamelCase = feature_extractor(np_speech_inputs[0] , return_tensors="""np""" ).input_features
self.assertTrue(np.allclose(A , A , atol=1e-3 ) )
# Test batched
lowerCamelCase = feature_extractor(A , return_tensors="""np""" ).input_features
lowerCamelCase = feature_extractor(A , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(A , A ):
self.assertTrue(np.allclose(A , A , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
lowerCamelCase = [floats_list((1, x) )[0] for x in (8_00, 8_00, 8_00)]
lowerCamelCase = np.asarray(A )
lowerCamelCase = feature_extractor(A , return_tensors="""np""" ).input_features
lowerCamelCase = feature_extractor(A , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(A , A ):
self.assertTrue(np.allclose(A , A , atol=1e-3 ) )
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCamelCase = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
lowerCamelCase = ["""longest""", """max_length""", """do_not_pad"""]
lowerCamelCase = [None, 16, None]
for max_length, padding in zip(A , A ):
lowerCamelCase = feature_extractor(
A , padding=A , max_length=A , return_attention_mask=A )
lowerCamelCase = inputs.input_features
lowerCamelCase = inputs.attention_mask
lowerCamelCase = [np.sum(A ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def __A ( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCamelCase = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
lowerCamelCase = ["""longest""", """max_length""", """do_not_pad"""]
lowerCamelCase = [None, 16, None]
for max_length, padding in zip(A , A ):
lowerCamelCase = feature_extractor(
A , max_length=A , padding=A , return_tensors="""np""" , return_attention_mask=A )
lowerCamelCase = inputs.input_features
lowerCamelCase = inputs.attention_mask
lowerCamelCase = [np.sum(A ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self.assertTrue(input_features[0][fbank_feat_lengths[0] :].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self.assertTrue(input_features[0][fbank_feat_lengths[1] :].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def __A ( self ) -> int:
'''simple docstring'''
lowerCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCamelCase = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
lowerCamelCase = feature_extractor(
A , padding="""max_length""" , max_length=4 , truncation=A , return_tensors="""np""" , return_attention_mask=A , )
lowerCamelCase = inputs.input_features
lowerCamelCase = inputs.attention_mask
lowerCamelCase = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1] )
self._check_zero_mean_unit_variance(input_features[2] )
def __A ( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCamelCase = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
lowerCamelCase = feature_extractor(
A , padding="""longest""" , max_length=4 , truncation=A , return_tensors="""np""" , return_attention_mask=A , )
lowerCamelCase = inputs.input_features
lowerCamelCase = inputs.attention_mask
lowerCamelCase = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 4, 24) )
lowerCamelCase = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
lowerCamelCase = feature_extractor(
A , padding="""longest""" , max_length=16 , truncation=A , return_tensors="""np""" , return_attention_mask=A , )
lowerCamelCase = inputs.input_features
lowerCamelCase = inputs.attention_mask
lowerCamelCase = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 6, 24) )
def __A ( self ) -> Optional[int]:
'''simple docstring'''
import torch
lowerCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCamelCase = np.random.rand(1_00 , 32 ).astype(np.floataa )
lowerCamelCase = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
lowerCamelCase = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""np""" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
lowerCamelCase = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""pt""" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def __A ( self , A ) -> Any:
'''simple docstring'''
from datasets import load_dataset
lowerCamelCase = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
lowerCamelCase = ds.sort("""id""" ).select(range(A ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
def __A ( self ) -> Any:
'''simple docstring'''
lowerCamelCase = np.array([
-1.5745, -1.7713, -1.7020, -1.6069, -1.2250, -1.1105, -0.9072, -0.8241,
-1.2310, -0.8098, -0.3320, -0.4101, -0.7985, -0.4996, -0.8213, -0.9128,
-1.0420, -1.1286, -1.0440, -0.7999, -0.8405, -1.2275, -1.5443, -1.4625,
] )
# fmt: on
lowerCamelCase = self._load_datasamples(1 )
lowerCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCamelCase = feature_extractor(A , return_tensors="""pt""" ).input_features
self.assertEquals(input_features.shape , (1, 5_84, 24) )
self.assertTrue(np.allclose(input_features[0, 0, :30] , A , atol=1e-4 ) )
| 457 | 1 |
def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ):
if len(lowerCAmelCase__ ) != len(lowerCAmelCase__ ):
raise ValueError("""The length of profit and weight must be same.""" )
if max_weight <= 0:
raise ValueError("""max_weight must greater than zero.""" )
if any(p < 0 for p in profit ):
raise ValueError("""Profit can not be negative.""" )
if any(w < 0 for w in weight ):
raise ValueError("""Weight can not be negative.""" )
# List created to store profit gained for the 1kg in case of each weight
# respectively. Calculate and append profit/weight for each element.
lowercase = [p / w for p, w in zip(lowerCAmelCase__ ,lowerCAmelCase__ )]
# Creating a copy of the list and sorting profit/weight in ascending order
lowercase = sorted(lowerCAmelCase__ )
# declaring useful variables
lowercase = len(lowerCAmelCase__ )
lowercase = 0
lowercase = 0
lowercase = 0
# loop till the total weight do not reach max limit e.g. 15 kg and till i<length
while limit <= max_weight and i < length:
# flag value for encountered greatest element in sorted_profit_by_weight
lowercase = sorted_profit_by_weight[length - i - 1]
lowercase = profit_by_weight.index(lowerCAmelCase__ )
lowercase = -1
# check if the weight encountered is less than the total weight
# encountered before.
if max_weight - limit >= weight[index]:
limit += weight[index]
# Adding profit gained for the given weight 1 ===
# weight[index]/weight[index]
gain += 1 * profit[index]
else:
# Since the weight encountered is greater than limit, therefore take the
# required number of remaining kgs and calculate profit for it.
# weight remaining / weight[index]
gain += (max_weight - limit) / weight[index] * profit[index]
break
i += 1
return gain
if __name__ == "__main__":
print(
'''Input profits, weights, and then max_weight (all positive ints) separated by '''
'''spaces.'''
)
__SCREAMING_SNAKE_CASE : int =[int(x) for x in input('''Input profits separated by spaces: ''').split()]
__SCREAMING_SNAKE_CASE : List[Any] =[int(x) for x in input('''Input weights separated by spaces: ''').split()]
__SCREAMING_SNAKE_CASE : Optional[Any] =int(input('''Max weight allowed: '''))
# Function Call
calc_profit(profit, weight, max_weight)
| 713 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE : Union[str, Any] =logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Tuple ={
'''google/pix2struct-textcaps-base''': (
'''https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json'''
),
}
class A_ ( __a ):
_A :List[str] = '''pix2struct_text_model'''
_A :int = ['''past_key_values''']
_A :Optional[Any] = {
'''hidden_size''': '''hidden_size''',
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self : int , snake_case__ : str=5_02_44 , snake_case__ : Dict=7_68 , snake_case__ : Optional[Any]=64 , snake_case__ : Union[str, Any]=20_48 , snake_case__ : Union[str, Any]=12 , snake_case__ : str=12 , snake_case__ : int=32 , snake_case__ : List[Any]=1_28 , snake_case__ : Optional[int]=0.1 , snake_case__ : int=1E-6 , snake_case__ : int=1.0 , snake_case__ : Dict="gelu_new" , snake_case__ : Union[str, Any]=0 , snake_case__ : str=False , snake_case__ : List[str]=0 , snake_case__ : str=1 , snake_case__ : Optional[Any]=False , snake_case__ : Tuple=True , **snake_case__ : List[str] , ):
lowercase = vocab_size
lowercase = hidden_size
lowercase = d_kv
lowercase = d_ff
lowercase = num_layers
lowercase = num_heads
lowercase = relative_attention_num_buckets
lowercase = relative_attention_max_distance
lowercase = dropout_rate
lowercase = layer_norm_epsilon
lowercase = initializer_factor
lowercase = use_cache
lowercase = eos_token_id
lowercase = decoder_start_token_id
# for backwards compatibility
lowercase = dense_act_fn
super().__init__(
pad_token_id=snake_case__ , eos_token_id=snake_case__ , decoder_start_token_id=snake_case__ , tie_word_embeddings=snake_case__ , is_decoder=snake_case__ , **snake_case__ , )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Optional[int] , snake_case__ : Union[str, os.PathLike] , **snake_case__ : int ):
cls._set_token_in_kwargs(snake_case__ )
lowercase , lowercase = cls.get_config_dict(snake_case__ , **snake_case__ )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get("""model_type""" ) == "pix2struct":
lowercase = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(snake_case__ , **snake_case__ )
class A_ ( __a ):
_A :Optional[int] = '''pix2struct_vision_model'''
def __init__( self : Tuple , snake_case__ : Union[str, Any]=7_68 , snake_case__ : Any=7_68 , snake_case__ : Dict=20_48 , snake_case__ : int=64 , snake_case__ : str=12 , snake_case__ : Optional[int]=12 , snake_case__ : Union[str, Any]="gelu_new" , snake_case__ : Union[str, Any]=1E-6 , snake_case__ : int=0.0 , snake_case__ : Tuple=0.0 , snake_case__ : Optional[int]=1E-10 , snake_case__ : Optional[int]=1.0 , snake_case__ : Optional[Any]=40_96 , snake_case__ : Optional[int]=32 , snake_case__ : List[Any]=1_28 , **snake_case__ : Union[str, Any] , ):
super().__init__(**snake_case__ )
lowercase = hidden_size
lowercase = patch_embed_hidden_size
lowercase = d_ff
lowercase = dropout_rate
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = initializer_range
lowercase = initializer_factor
lowercase = attention_dropout
lowercase = layer_norm_eps
lowercase = dense_act_fn
lowercase = seq_len
lowercase = relative_attention_num_buckets
lowercase = relative_attention_max_distance
lowercase = d_kv
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : List[Any] , snake_case__ : Union[str, os.PathLike] , **snake_case__ : int ):
cls._set_token_in_kwargs(snake_case__ )
lowercase , lowercase = cls.get_config_dict(snake_case__ , **snake_case__ )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get("""model_type""" ) == "pix2struct":
lowercase = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(snake_case__ , **snake_case__ )
class A_ ( __a ):
_A :int = '''pix2struct'''
_A :str = True
def __init__( self : Optional[int] , snake_case__ : List[str]=None , snake_case__ : Optional[Any]=None , snake_case__ : List[Any]=1.0 , snake_case__ : Any=0.02 , snake_case__ : Tuple=False , snake_case__ : Union[str, Any]=False , snake_case__ : Tuple=True , **snake_case__ : int , ):
super().__init__(tie_word_embeddings=snake_case__ , is_encoder_decoder=snake_case__ , **snake_case__ )
if text_config is None:
lowercase = {}
logger.info("""text_config is None. Initializing the Pix2StructTextConfig with default values.""" )
if vision_config is None:
lowercase = {}
logger.info("""vision_config is None. Initializing the Pix2StructVisionConfig with default values.""" )
lowercase = PixaStructTextConfig(**snake_case__ )
lowercase = PixaStructVisionConfig(**snake_case__ )
lowercase = self.text_config.decoder_start_token_id
lowercase = self.text_config.pad_token_id
lowercase = self.text_config.eos_token_id
lowercase = initializer_factor
lowercase = initializer_range
lowercase = self.initializer_range
lowercase = self.initializer_range
lowercase = is_vqa
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Tuple , snake_case__ : PixaStructTextConfig , snake_case__ : PixaStructVisionConfig , **snake_case__ : Any ):
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **snake_case__ )
def SCREAMING_SNAKE_CASE__ ( self : str ):
lowercase = copy.deepcopy(self.__dict__ )
lowercase = self.text_config.to_dict()
lowercase = self.vision_config.to_dict()
lowercase = self.__class__.model_type
return output
| 72 | 0 |
from __future__ import annotations
from math import pi
def _lowerCamelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> dict[str, float]:
'''simple docstring'''
if (inductance, frequency, reactance).count(0 ) != 1:
raise ValueError("""One and only one argument must be 0""" )
if inductance < 0:
raise ValueError("""Inductance cannot be negative""" )
if frequency < 0:
raise ValueError("""Frequency cannot be negative""" )
if reactance < 0:
raise ValueError("""Inductive reactance cannot be negative""" )
if inductance == 0:
return {"inductance": reactance / (2 * pi * frequency)}
elif frequency == 0:
return {"frequency": reactance / (2 * pi * inductance)}
elif reactance == 0:
return {"reactance": 2 * pi * frequency * inductance}
else:
raise ValueError("""Exactly one argument must be 0""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 79 |
import math
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : int) -> str:
'''simple docstring'''
__UpperCamelCase : Union[str, Any] = 0
__UpperCamelCase : int = 0
while num > 0:
__UpperCamelCase : List[Any] = num % 8
__UpperCamelCase : Tuple = octal + (remainder * math.floor(math.pow(10 , _lowerCamelCase)))
counter += 1
__UpperCamelCase : Optional[Any] = math.floor(num / 8) # basically /= 8 without remainder if any
# This formatting removes trailing '.0' from `octal`.
return F'0o{int(_lowerCamelCase)}'
def _SCREAMING_SNAKE_CASE ( ) -> None:
'''simple docstring'''
print("\n2 in octal is:")
print(decimal_to_octal(2)) # = 2
print("\n8 in octal is:")
print(decimal_to_octal(8)) # = 10
print("\n65 in octal is:")
print(decimal_to_octal(65)) # = 101
print("\n216 in octal is:")
print(decimal_to_octal(216)) # = 330
print("\n512 in octal is:")
print(decimal_to_octal(512)) # = 1000
print("\n")
if __name__ == "__main__":
main() | 557 | 0 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class _UpperCamelCase :
'''simple docstring'''
__UpperCAmelCase : List[str]
__UpperCAmelCase : Optional[str] =None
# Automatically constructed
__UpperCAmelCase : ClassVar[str] ="dict"
__UpperCAmelCase : ClassVar[Any] =None
__UpperCAmelCase : str =field(default="""Translation""" ,init=lowerCAmelCase__ ,repr=lowerCAmelCase__ )
def __call__( self ):
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def snake_case ( self ):
from .features import Value
return {k: Value("string" ) for k in sorted(self.languages )}
@dataclass
class _UpperCamelCase :
'''simple docstring'''
__UpperCAmelCase : Optional[List] =None
__UpperCAmelCase : Optional[int] =None
__UpperCAmelCase : Optional[str] =None
# Automatically constructed
__UpperCAmelCase : ClassVar[str] ="dict"
__UpperCAmelCase : ClassVar[Any] =None
__UpperCAmelCase : str =field(default="""TranslationVariableLanguages""" ,init=lowerCAmelCase__ ,repr=lowerCAmelCase__ )
def snake_case ( self ):
__lowerCAmelCase = sorted(set(self.languages ) ) if self.languages else None
__lowerCAmelCase = len(self.languages ) if self.languages else None
def __call__( self ):
return pa.struct({"language": pa.list_(pa.string() ), "translation": pa.list_(pa.string() )} )
def snake_case ( self , __a ):
__lowerCAmelCase = set(self.languages )
if self.languages and set(__a ) - lang_set:
raise ValueError(
f"Some languages in example ({', '.join(sorted(set(__a ) - lang_set ) )}) are not in valid set ({', '.join(__a )})." )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
__lowerCAmelCase = []
for lang, text in translation_dict.items():
if isinstance(__a , __a ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
__lowerCAmelCase , __lowerCAmelCase = zip(*sorted(__a ) )
return {"language": languages, "translation": translations}
def snake_case ( self ):
from .features import Sequence, Value
return {
"language": Sequence(Value("string" ) ),
"translation": Sequence(Value("string" ) ),
}
| 282 |
"""simple docstring"""
import tensorflow as tf
from ...tf_utils import shape_list
class _UpperCamelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , __a , __a , __a , __a , __a=1 , __a=False , **__a ):
super().__init__(**__a )
__lowerCAmelCase = vocab_size
__lowerCAmelCase = d_embed
__lowerCAmelCase = d_proj
__lowerCAmelCase = cutoffs + [vocab_size]
__lowerCAmelCase = [0] + self.cutoffs
__lowerCAmelCase = div_val
__lowerCAmelCase = self.cutoffs[0]
__lowerCAmelCase = len(self.cutoffs ) - 1
__lowerCAmelCase = self.shortlist_size + self.n_clusters
__lowerCAmelCase = keep_order
__lowerCAmelCase = []
__lowerCAmelCase = []
def snake_case ( self , __a ):
if self.n_clusters > 0:
__lowerCAmelCase = self.add_weight(
shape=(self.n_clusters, self.d_embed) , initializer="zeros" , trainable=__a , name="cluster_weight" )
__lowerCAmelCase = self.add_weight(
shape=(self.n_clusters,) , initializer="zeros" , trainable=__a , name="cluster_bias" )
if self.div_val == 1:
for i in range(len(self.cutoffs ) ):
if self.d_proj != self.d_embed:
__lowerCAmelCase = self.add_weight(
shape=(self.d_embed, self.d_proj) , initializer="zeros" , trainable=__a , name=f"out_projs_._{i}" , )
self.out_projs.append(__a )
else:
self.out_projs.append(__a )
__lowerCAmelCase = self.add_weight(
shape=(self.vocab_size, self.d_embed) , initializer="zeros" , trainable=__a , name=f"out_layers_._{i}_._weight" , )
__lowerCAmelCase = self.add_weight(
shape=(self.vocab_size,) , initializer="zeros" , trainable=__a , name=f"out_layers_._{i}_._bias" , )
self.out_layers.append((weight, bias) )
else:
for i in range(len(self.cutoffs ) ):
__lowerCAmelCase , __lowerCAmelCase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
__lowerCAmelCase = self.d_embed // (self.div_val**i)
__lowerCAmelCase = self.add_weight(
shape=(d_emb_i, self.d_proj) , initializer="zeros" , trainable=__a , name=f"out_projs_._{i}" )
self.out_projs.append(__a )
__lowerCAmelCase = self.add_weight(
shape=(r_idx - l_idx, d_emb_i) , initializer="zeros" , trainable=__a , name=f"out_layers_._{i}_._weight" , )
__lowerCAmelCase = self.add_weight(
shape=(r_idx - l_idx,) , initializer="zeros" , trainable=__a , name=f"out_layers_._{i}_._bias" , )
self.out_layers.append((weight, bias) )
super().build(__a )
@staticmethod
def snake_case ( __a , __a , __a , __a=None ):
__lowerCAmelCase = x
if proj is not None:
__lowerCAmelCase = tf.einsum("ibd,ed->ibe" , __a , __a )
return tf.einsum("ibd,nd->ibn" , __a , __a ) + b
@staticmethod
def snake_case ( __a , __a ):
__lowerCAmelCase = shape_list(__a )
__lowerCAmelCase = tf.range(lp_size[0] , dtype=target.dtype )
__lowerCAmelCase = tf.stack([r, target] , 1 )
return tf.gather_nd(__a , __a )
def snake_case ( self , __a , __a , __a=True , __a=False ):
__lowerCAmelCase = 0
if self.n_clusters == 0:
__lowerCAmelCase = self._logit(__a , self.out_layers[0][0] , self.out_layers[0][1] , self.out_projs[0] )
if target is not None:
__lowerCAmelCase = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=__a , logits=__a )
__lowerCAmelCase = tf.nn.log_softmax(__a , axis=-1 )
else:
__lowerCAmelCase = shape_list(__a )
__lowerCAmelCase = []
__lowerCAmelCase = tf.zeros(hidden_sizes[:2] )
for i in range(len(self.cutoffs ) ):
__lowerCAmelCase , __lowerCAmelCase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
if target is not None:
__lowerCAmelCase = (target >= l_idx) & (target < r_idx)
__lowerCAmelCase = tf.where(__a )
__lowerCAmelCase = tf.boolean_mask(__a , __a ) - l_idx
if self.div_val == 1:
__lowerCAmelCase = self.out_layers[0][0][l_idx:r_idx]
__lowerCAmelCase = self.out_layers[0][1][l_idx:r_idx]
else:
__lowerCAmelCase = self.out_layers[i][0]
__lowerCAmelCase = self.out_layers[i][1]
if i == 0:
__lowerCAmelCase = tf.concat([cur_W, self.cluster_weight] , 0 )
__lowerCAmelCase = tf.concat([cur_b, self.cluster_bias] , 0 )
__lowerCAmelCase = self._logit(__a , __a , __a , self.out_projs[0] )
__lowerCAmelCase = tf.nn.log_softmax(__a )
out.append(head_logprob[..., : self.cutoffs[0]] )
if target is not None:
__lowerCAmelCase = tf.boolean_mask(__a , __a )
__lowerCAmelCase = self._gather_logprob(__a , __a )
else:
__lowerCAmelCase = self._logit(__a , __a , __a , self.out_projs[i] )
__lowerCAmelCase = tf.nn.log_softmax(__a )
__lowerCAmelCase = self.cutoffs[0] + i - 1 # No probability for the head cluster
__lowerCAmelCase = head_logprob[..., cluster_prob_idx, None] + tail_logprob
out.append(__a )
if target is not None:
__lowerCAmelCase = tf.boolean_mask(__a , __a )
__lowerCAmelCase = tf.boolean_mask(__a , __a )
__lowerCAmelCase = self._gather_logprob(__a , __a )
cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1]
if target is not None:
loss += tf.scatter_nd(__a , -cur_logprob , shape_list(__a ) )
__lowerCAmelCase = tf.concat(__a , axis=-1 )
if target is not None:
if return_mean:
__lowerCAmelCase = tf.reduce_mean(__a )
# Add the training-time loss value to the layer using `self.add_loss()`.
self.add_loss(__a )
# Log the loss as a metric (we could log arbitrary metrics,
# including different metrics for training and inference.
self.add_metric(__a , name=self.name , aggregation="mean" if return_mean else "" )
return out
| 282 | 1 |
from __future__ import annotations
from collections.abc import Callable
from typing import Generic, TypeVar
UpperCAmelCase = TypeVar('''T''')
UpperCAmelCase = TypeVar('''U''')
class A_ ( Generic[T, U] ):
'''simple docstring'''
def __init__( self , snake_case , snake_case ):
lowercase = key
lowercase = val
lowercase = None
lowercase = None
def __repr__( self ):
return (
F'''Node: key: {self.key}, val: {self.val}, '''
F'''has next: {bool(self.next )}, has prev: {bool(self.prev )}'''
)
class A_ ( Generic[T, U] ):
'''simple docstring'''
def __init__( self ):
lowercase = DoubleLinkedListNode(snake_case , snake_case )
lowercase = DoubleLinkedListNode(snake_case , snake_case )
lowercase , lowercase = self.rear, self.head
def __repr__( self ):
lowercase = ['DoubleLinkedList']
lowercase = self.head
while node.next is not None:
rep.append(str(snake_case ) )
lowercase = node.next
rep.append(str(self.rear ) )
return ",\n ".join(snake_case )
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
lowercase = self.rear.prev
# All nodes other than self.head are guaranteed to have non-None previous
assert previous is not None
lowercase = node
lowercase = previous
lowercase = node
lowercase = self.rear
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
if node.prev is None or node.next is None:
return None
lowercase = node.next
lowercase = node.prev
lowercase = None
lowercase = None
return node
class A_ ( Generic[T, U] ):
'''simple docstring'''
_UpperCamelCase : dict[Callable[[T], U], LRUCache[T, U]] = {}
def __init__( self , snake_case ):
lowercase = DoubleLinkedList()
lowercase = capacity
lowercase = 0
lowercase = 0
lowercase = 0
lowercase = {}
def __repr__( self ):
return (
F'''CacheInfo(hits={self.hits}, misses={self.miss}, '''
F'''capacity={self.capacity}, current size={self.num_keys})'''
)
def __contains__( self , snake_case ):
return key in self.cache
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
# Note: pythonic interface would throw KeyError rather than return None
if key in self.cache:
self.hits += 1
lowercase = self.cache[key]
lowercase = self.list.remove(self.cache[key] )
assert node == value_node
# node is guaranteed not None because it is in self.cache
assert node is not None
self.list.add(snake_case )
return node.val
self.miss += 1
return None
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case ):
if key not in self.cache:
if self.num_keys >= self.capacity:
# delete first node (oldest) when over capacity
lowercase = self.list.head.next
# guaranteed to have a non-None first node when num_keys > 0
# explain to type checker via assertions
assert first_node is not None
assert first_node.key is not None
assert (
self.list.remove(snake_case ) is not None
) # node guaranteed to be in list assert node.key is not None
del self.cache[first_node.key]
self.num_keys -= 1
lowercase = DoubleLinkedListNode(snake_case , snake_case )
self.list.add(self.cache[key] )
self.num_keys += 1
else:
# bump node to the end of the list, update value
lowercase = self.list.remove(self.cache[key] )
assert node is not None # node guaranteed to be in list
lowercase = value
self.list.add(snake_case )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls , snake_case = 128 ):
def cache_decorator_inner(snake_case ) -> Callable[..., U]:
def cache_decorator_wrapper(*snake_case ) -> U:
if func not in cls.decorator_function_to_instance_map:
lowercase = LRUCache(snake_case )
lowercase = cls.decorator_function_to_instance_map[func].get(args[0] )
if result is None:
lowercase = func(*snake_case )
cls.decorator_function_to_instance_map[func].put(args[0] , snake_case )
return result
def cache_info() -> LRUCache[T, U]:
return cls.decorator_function_to_instance_map[func]
setattr(snake_case , 'cache_info' , snake_case ) # noqa: B010
return cache_decorator_wrapper
return cache_decorator_inner
if __name__ == "__main__":
import doctest
doctest.testmod()
| 84 |
import argparse
import torch
from ...utils import logging
from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert
logging.set_verbosity_info()
def _snake_case (__lowercase , __lowercase , __lowercase):
# Initialise PyTorch model
UpperCamelCase_ = AlbertConfig.from_json_file(__lowercase)
print(f"""Building PyTorch model from configuration: {config}""")
UpperCamelCase_ = AlbertForPreTraining(__lowercase)
# Load weights from tf checkpoint
load_tf_weights_in_albert(__lowercase , __lowercase , __lowercase)
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""")
torch.save(model.state_dict() , __lowercase)
if __name__ == "__main__":
snake_case__ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--albert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained ALBERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
snake_case__ : str = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
| 23 | 0 |
import contextlib
import importlib
import io
import unittest
import transformers
# Try to import everything from transformers to ensure every object can be loaded.
from transformers import * # noqa F406
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch
from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available
if is_torch_available():
from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification
if is_tf_available():
from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification
if is_flax_available():
from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification
SCREAMING_SNAKE_CASE__ = DUMMY_UNKNOWN_IDENTIFIER
# An actual model hosted on huggingface.co
SCREAMING_SNAKE_CASE__ = """main"""
# Default branch name
SCREAMING_SNAKE_CASE__ = """f2c752cfc5c0ab6f4bdec59acea69eefbee381c2"""
# One particular commit (not the top of `main`)
SCREAMING_SNAKE_CASE__ = """aaaaaaa"""
# This commit does not exist, so we should 404.
SCREAMING_SNAKE_CASE__ = """d9e9f15bc825e4b2c9249e9578f884bbcb5e3684"""
# Sha-1 of config.json on the top of `main`, for checking purposes
SCREAMING_SNAKE_CASE__ = """4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3"""
@contextlib.contextmanager
def __SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
print('Welcome!' )
yield
print('Bye!' )
@contextlib.contextmanager
def __SCREAMING_SNAKE_CASE ( ) -> str:
print('Bonjour!' )
yield
print('Au revoir!' )
class A__ ( unittest.TestCase ):
def a__ ( self : Optional[int] ) -> int:
"""simple docstring"""
assert transformers.__spec__ is not None
assert importlib.util.find_spec('transformers' ) is not None
class A__ ( unittest.TestCase ):
@unittest.mock.patch('sys.stdout' , new_callable=io.StringIO )
def a__ ( self : str , _UpperCAmelCase : Optional[Any] ) -> List[str]:
"""simple docstring"""
with ContextManagers([] ):
print('Transformers are awesome!' )
# The print statement adds a new line at the end of the output
self.assertEqual(mock_stdout.getvalue() , 'Transformers are awesome!\n' )
@unittest.mock.patch('sys.stdout' , new_callable=io.StringIO )
def a__ ( self : Any , _UpperCAmelCase : Any ) -> Optional[Any]:
"""simple docstring"""
with ContextManagers([context_en()] ):
print('Transformers are awesome!' )
# The output should be wrapped with an English welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , 'Welcome!\nTransformers are awesome!\nBye!\n' )
@unittest.mock.patch('sys.stdout' , new_callable=io.StringIO )
def a__ ( self : List[Any] , _UpperCAmelCase : str ) -> Tuple:
"""simple docstring"""
with ContextManagers([context_fr(), context_en()] ):
print('Transformers are awesome!' )
# The output should be wrapped with an English and French welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , 'Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n' )
@require_torch
def a__ ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
self.assertEqual(find_labels(_UpperCAmelCase ) , ['labels'] )
self.assertEqual(find_labels(_UpperCAmelCase ) , ['labels', 'next_sentence_label'] )
self.assertEqual(find_labels(_UpperCAmelCase ) , ['start_positions', 'end_positions'] )
class A__ ( lowerCAmelCase__ ):
pass
self.assertEqual(find_labels(_UpperCAmelCase ) , ['labels'] )
@require_tf
def a__ ( self : Tuple ) -> str:
"""simple docstring"""
self.assertEqual(find_labels(_UpperCAmelCase ) , ['labels'] )
self.assertEqual(find_labels(_UpperCAmelCase ) , ['labels', 'next_sentence_label'] )
self.assertEqual(find_labels(_UpperCAmelCase ) , ['start_positions', 'end_positions'] )
class A__ ( lowerCAmelCase__ ):
pass
self.assertEqual(find_labels(_UpperCAmelCase ) , ['labels'] )
@require_flax
def a__ ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
self.assertEqual(find_labels(_UpperCAmelCase ) , [] )
self.assertEqual(find_labels(_UpperCAmelCase ) , [] )
self.assertEqual(find_labels(_UpperCAmelCase ) , [] )
class A__ ( lowerCAmelCase__ ):
pass
self.assertEqual(find_labels(_UpperCAmelCase ) , [] )
| 688 |
import string
from math import logaa
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str ) -> int:
__lowercase = document.translate(
str.maketrans('' , '' , string.punctuation ) ).replace('\n' , '' )
__lowercase = document_without_punctuation.split(' ' ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str ) -> tuple[int, int]:
__lowercase = corpus.lower().translate(
str.maketrans('' , '' , string.punctuation ) ) # strip all punctuation and replace it with ''
__lowercase = corpus_without_punctuation.split('\n' )
__lowercase = term.lower()
return (len([doc for doc in docs if term in doc] ), len(SCREAMING_SNAKE_CASE ))
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : List[str]=False ) -> float:
if smoothing:
if n == 0:
raise ValueError('log10(0) is undefined.' )
return round(1 + logaa(n / (1 + df) ) , 3 )
if df == 0:
raise ZeroDivisionError('df must be > 0' )
elif n == 0:
raise ValueError('log10(0) is undefined.' )
return round(logaa(n / df ) , 3 )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ) -> float:
return round(tf * idf , 3 )
| 688 | 1 |
'''simple docstring'''
import json
import os
import unittest
from transformers import BatchEncoding, MvpTokenizer, MvpTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin, filter_roberta_detectors
@require_tokenizers
class __lowerCamelCase ( __snake_case , unittest.TestCase ):
'''simple docstring'''
snake_case__ : List[Any] = MvpTokenizer
snake_case__ : Optional[Any] = MvpTokenizerFast
snake_case__ : List[Any] = True
snake_case__ : Dict = filter_roberta_detectors
def a_ ( self ):
super().setUp()
__SCREAMING_SNAKE_CASE : Optional[int] = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
__SCREAMING_SNAKE_CASE : str = dict(zip(_lowerCAmelCase , range(len(_lowerCAmelCase ) ) ) )
__SCREAMING_SNAKE_CASE : Optional[int] = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
__SCREAMING_SNAKE_CASE : Optional[Any] = {'unk_token': '<unk>'}
__SCREAMING_SNAKE_CASE : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
__SCREAMING_SNAKE_CASE : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(_lowerCAmelCase ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(_lowerCAmelCase ) )
def a_ ( self , **a__ ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_lowerCAmelCase )
def a_ ( self , **a__ ):
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **_lowerCAmelCase )
def a_ ( self , a__ ):
return "lower newer", "lower newer"
@cached_property
def a_ ( self ):
return MvpTokenizer.from_pretrained("RUCAIBox/mvp" )
@cached_property
def a_ ( self ):
return MvpTokenizerFast.from_pretrained("RUCAIBox/mvp" )
@require_torch
def a_ ( self ):
__SCREAMING_SNAKE_CASE : int = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
__SCREAMING_SNAKE_CASE : Tuple = [0, 250, 251, 17818, 13, 39186, 1938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__SCREAMING_SNAKE_CASE : List[Any] = tokenizer(_lowerCAmelCase , max_length=len(_lowerCAmelCase ) , padding=_lowerCAmelCase , return_tensors="pt" )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
__SCREAMING_SNAKE_CASE : str = batch.input_ids.tolist()[0]
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
# Test that special tokens are reset
@require_torch
def a_ ( self ):
__SCREAMING_SNAKE_CASE : Dict = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__SCREAMING_SNAKE_CASE : str = tokenizer(_lowerCAmelCase , padding=_lowerCAmelCase , return_tensors="pt" )
# check if input_ids are returned and no labels
self.assertIn("input_ids" , _lowerCAmelCase )
self.assertIn("attention_mask" , _lowerCAmelCase )
self.assertNotIn("labels" , _lowerCAmelCase )
self.assertNotIn("decoder_attention_mask" , _lowerCAmelCase )
@require_torch
def a_ ( self ):
__SCREAMING_SNAKE_CASE : Optional[int] = [
'Summary of the text.',
'Another summary.',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__SCREAMING_SNAKE_CASE : List[str] = tokenizer(text_target=_lowerCAmelCase , max_length=32 , padding="max_length" , return_tensors="pt" )
self.assertEqual(32 , targets["input_ids"].shape[1] )
@require_torch
def a_ ( self ):
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__SCREAMING_SNAKE_CASE : Optional[int] = tokenizer(
["I am a small frog" * 1024, "I am a small frog"] , padding=_lowerCAmelCase , truncation=_lowerCAmelCase , return_tensors="pt" )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
self.assertEqual(batch.input_ids.shape , (2, 1024) )
@require_torch
def a_ ( self ):
__SCREAMING_SNAKE_CASE : Tuple = ['A long paragraph for summarization.']
__SCREAMING_SNAKE_CASE : Any = [
'Summary of the text.',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer(_lowerCAmelCase , text_target=_lowerCAmelCase , return_tensors="pt" )
__SCREAMING_SNAKE_CASE : Any = inputs['input_ids']
__SCREAMING_SNAKE_CASE : Union[str, Any] = inputs['labels']
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
def a_ ( self ):
pass
def a_ ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__SCREAMING_SNAKE_CASE : Optional[int] = self.rust_tokenizer_class.from_pretrained(_lowerCAmelCase , **_lowerCAmelCase )
__SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer_class.from_pretrained(_lowerCAmelCase , **_lowerCAmelCase )
__SCREAMING_SNAKE_CASE : Optional[Any] = 'A, <mask> AllenNLP sentence.'
__SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer_r.encode_plus(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase , return_token_type_ids=_lowerCAmelCase )
__SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer_p.encode_plus(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase , return_token_type_ids=_lowerCAmelCase )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , )
__SCREAMING_SNAKE_CASE : Dict = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] )
__SCREAMING_SNAKE_CASE : Dict = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["input_ids"] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r["input_ids"] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(
_lowerCAmelCase , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
self.assertSequenceEqual(
_lowerCAmelCase , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
| 211 |
from PIL import Image
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Image:
def brightness(SCREAMING_SNAKE_CASE ) -> float:
return 128 + level + (c - 128)
if not -255.0 <= level <= 255.0:
raise ValueError('level must be between -255.0 (black) and 255.0 (white)' )
return img.point(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
# Load image
with Image.open("image_data/lena.jpg") as img:
# Change brightness to 100
UpperCamelCase = change_brightness(img, 100)
brigt_img.save("image_data/lena_brightness.png", format="png")
| 66 | 0 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class lowerCamelCase_ ( metaclass=lowercase ):
"""simple docstring"""
_lowerCAmelCase : List[str] = ["flax"]
def __init__( self , *UpperCAmelCase__ , **UpperCAmelCase__ ):
requires_backends(self , ["flax"] )
@classmethod
def lowerCAmelCase__ ( cls , *UpperCAmelCase__ , **UpperCAmelCase__ ):
requires_backends(cls , ["flax"] )
@classmethod
def lowerCAmelCase__ ( cls , *UpperCAmelCase__ , **UpperCAmelCase__ ):
requires_backends(cls , ["flax"] )
class lowerCamelCase_ ( metaclass=lowercase ):
"""simple docstring"""
_lowerCAmelCase : Optional[Any] = ["flax"]
def __init__( self , *UpperCAmelCase__ , **UpperCAmelCase__ ):
requires_backends(self , ["flax"] )
@classmethod
def lowerCAmelCase__ ( cls , *UpperCAmelCase__ , **UpperCAmelCase__ ):
requires_backends(cls , ["flax"] )
@classmethod
def lowerCAmelCase__ ( cls , *UpperCAmelCase__ , **UpperCAmelCase__ ):
requires_backends(cls , ["flax"] )
class lowerCamelCase_ ( metaclass=lowercase ):
"""simple docstring"""
_lowerCAmelCase : Any = ["flax"]
def __init__( self , *UpperCAmelCase__ , **UpperCAmelCase__ ):
requires_backends(self , ["flax"] )
@classmethod
def lowerCAmelCase__ ( cls , *UpperCAmelCase__ , **UpperCAmelCase__ ):
requires_backends(cls , ["flax"] )
@classmethod
def lowerCAmelCase__ ( cls , *UpperCAmelCase__ , **UpperCAmelCase__ ):
requires_backends(cls , ["flax"] )
class lowerCamelCase_ ( metaclass=lowercase ):
"""simple docstring"""
_lowerCAmelCase : Tuple = ["flax"]
def __init__( self , *UpperCAmelCase__ , **UpperCAmelCase__ ):
requires_backends(self , ["flax"] )
@classmethod
def lowerCAmelCase__ ( cls , *UpperCAmelCase__ , **UpperCAmelCase__ ):
requires_backends(cls , ["flax"] )
@classmethod
def lowerCAmelCase__ ( cls , *UpperCAmelCase__ , **UpperCAmelCase__ ):
requires_backends(cls , ["flax"] )
class lowerCamelCase_ ( metaclass=lowercase ):
"""simple docstring"""
_lowerCAmelCase : str = ["flax"]
def __init__( self , *UpperCAmelCase__ , **UpperCAmelCase__ ):
requires_backends(self , ["flax"] )
@classmethod
def lowerCAmelCase__ ( cls , *UpperCAmelCase__ , **UpperCAmelCase__ ):
requires_backends(cls , ["flax"] )
@classmethod
def lowerCAmelCase__ ( cls , *UpperCAmelCase__ , **UpperCAmelCase__ ):
requires_backends(cls , ["flax"] )
class lowerCamelCase_ ( metaclass=lowercase ):
"""simple docstring"""
_lowerCAmelCase : str = ["flax"]
def __init__( self , *UpperCAmelCase__ , **UpperCAmelCase__ ):
requires_backends(self , ["flax"] )
@classmethod
def lowerCAmelCase__ ( cls , *UpperCAmelCase__ , **UpperCAmelCase__ ):
requires_backends(cls , ["flax"] )
@classmethod
def lowerCAmelCase__ ( cls , *UpperCAmelCase__ , **UpperCAmelCase__ ):
requires_backends(cls , ["flax"] )
class lowerCamelCase_ ( metaclass=lowercase ):
"""simple docstring"""
_lowerCAmelCase : Any = ["flax"]
def __init__( self , *UpperCAmelCase__ , **UpperCAmelCase__ ):
requires_backends(self , ["flax"] )
@classmethod
def lowerCAmelCase__ ( cls , *UpperCAmelCase__ , **UpperCAmelCase__ ):
requires_backends(cls , ["flax"] )
@classmethod
def lowerCAmelCase__ ( cls , *UpperCAmelCase__ , **UpperCAmelCase__ ):
requires_backends(cls , ["flax"] )
class lowerCamelCase_ ( metaclass=lowercase ):
"""simple docstring"""
_lowerCAmelCase : List[Any] = ["flax"]
def __init__( self , *UpperCAmelCase__ , **UpperCAmelCase__ ):
requires_backends(self , ["flax"] )
@classmethod
def lowerCAmelCase__ ( cls , *UpperCAmelCase__ , **UpperCAmelCase__ ):
requires_backends(cls , ["flax"] )
@classmethod
def lowerCAmelCase__ ( cls , *UpperCAmelCase__ , **UpperCAmelCase__ ):
requires_backends(cls , ["flax"] )
class lowerCamelCase_ ( metaclass=lowercase ):
"""simple docstring"""
_lowerCAmelCase : Dict = ["flax"]
def __init__( self , *UpperCAmelCase__ , **UpperCAmelCase__ ):
requires_backends(self , ["flax"] )
@classmethod
def lowerCAmelCase__ ( cls , *UpperCAmelCase__ , **UpperCAmelCase__ ):
requires_backends(cls , ["flax"] )
@classmethod
def lowerCAmelCase__ ( cls , *UpperCAmelCase__ , **UpperCAmelCase__ ):
requires_backends(cls , ["flax"] )
class lowerCamelCase_ ( metaclass=lowercase ):
"""simple docstring"""
_lowerCAmelCase : List[Any] = ["flax"]
def __init__( self , *UpperCAmelCase__ , **UpperCAmelCase__ ):
requires_backends(self , ["flax"] )
@classmethod
def lowerCAmelCase__ ( cls , *UpperCAmelCase__ , **UpperCAmelCase__ ):
requires_backends(cls , ["flax"] )
@classmethod
def lowerCAmelCase__ ( cls , *UpperCAmelCase__ , **UpperCAmelCase__ ):
requires_backends(cls , ["flax"] )
class lowerCamelCase_ ( metaclass=lowercase ):
"""simple docstring"""
_lowerCAmelCase : Union[str, Any] = ["flax"]
def __init__( self , *UpperCAmelCase__ , **UpperCAmelCase__ ):
requires_backends(self , ["flax"] )
@classmethod
def lowerCAmelCase__ ( cls , *UpperCAmelCase__ , **UpperCAmelCase__ ):
requires_backends(cls , ["flax"] )
@classmethod
def lowerCAmelCase__ ( cls , *UpperCAmelCase__ , **UpperCAmelCase__ ):
requires_backends(cls , ["flax"] )
class lowerCamelCase_ ( metaclass=lowercase ):
"""simple docstring"""
_lowerCAmelCase : int = ["flax"]
def __init__( self , *UpperCAmelCase__ , **UpperCAmelCase__ ):
requires_backends(self , ["flax"] )
@classmethod
def lowerCAmelCase__ ( cls , *UpperCAmelCase__ , **UpperCAmelCase__ ):
requires_backends(cls , ["flax"] )
@classmethod
def lowerCAmelCase__ ( cls , *UpperCAmelCase__ , **UpperCAmelCase__ ):
requires_backends(cls , ["flax"] )
class lowerCamelCase_ ( metaclass=lowercase ):
"""simple docstring"""
_lowerCAmelCase : List[str] = ["flax"]
def __init__( self , *UpperCAmelCase__ , **UpperCAmelCase__ ):
requires_backends(self , ["flax"] )
@classmethod
def lowerCAmelCase__ ( cls , *UpperCAmelCase__ , **UpperCAmelCase__ ):
requires_backends(cls , ["flax"] )
@classmethod
def lowerCAmelCase__ ( cls , *UpperCAmelCase__ , **UpperCAmelCase__ ):
requires_backends(cls , ["flax"] )
| 112 |
"""simple docstring"""
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class lowerCamelCase_ ( lowercase , lowercase ):
"""simple docstring"""
_lowerCAmelCase : List[Any] = 1
@register_to_config
def __init__( self , UpperCAmelCase__=2000 , UpperCAmelCase__=0.1 , UpperCAmelCase__=20 , UpperCAmelCase__=1e-3 ):
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
def lowerCAmelCase__ ( self , UpperCAmelCase__ , UpperCAmelCase__ = None ):
SCREAMING_SNAKE_CASE__ = torch.linspace(1 , self.config.sampling_eps , UpperCAmelCase__ , device=UpperCAmelCase__ )
def lowerCAmelCase__ ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__=None ):
if self.timesteps is None:
raise ValueError(
"`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler" )
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
SCREAMING_SNAKE_CASE__ = (
-0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
SCREAMING_SNAKE_CASE__ = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) )
SCREAMING_SNAKE_CASE__ = std.flatten()
while len(std.shape ) < len(score.shape ):
SCREAMING_SNAKE_CASE__ = std.unsqueeze(-1 )
SCREAMING_SNAKE_CASE__ = -score / std
# compute
SCREAMING_SNAKE_CASE__ = -1.0 / len(self.timesteps )
SCREAMING_SNAKE_CASE__ = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
SCREAMING_SNAKE_CASE__ = beta_t.flatten()
while len(beta_t.shape ) < len(x.shape ):
SCREAMING_SNAKE_CASE__ = beta_t.unsqueeze(-1 )
SCREAMING_SNAKE_CASE__ = -0.5 * beta_t * x
SCREAMING_SNAKE_CASE__ = torch.sqrt(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ = drift - diffusion**2 * score
SCREAMING_SNAKE_CASE__ = x + drift * dt
# add noise
SCREAMING_SNAKE_CASE__ = randn_tensor(x.shape , layout=x.layout , generator=UpperCAmelCase__ , device=x.device , dtype=x.dtype )
SCREAMING_SNAKE_CASE__ = x_mean + diffusion * math.sqrt(-dt ) * noise
return x, x_mean
def __len__( self ):
return self.config.num_train_timesteps
| 112 | 1 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowercase__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase : List[Any] = DanceDiffusionPipeline
__lowerCAmelCase : Any = UNCONDITIONAL_AUDIO_GENERATION_PARAMS
__lowerCAmelCase : List[str] = PipelineTesterMixin.required_optional_params - {
"""callback""",
"""latents""",
"""callback_steps""",
"""output_type""",
"""num_images_per_prompt""",
}
__lowerCAmelCase : List[str] = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
__lowerCAmelCase : int = False
__lowerCAmelCase : List[Any] = False
def _a ( self ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCamelCase : List[str] = UNetaDModel(
block_out_channels=(3_2, 3_2, 6_4) , extra_in_channels=1_6 , sample_size=5_1_2 , sample_rate=1_6_0_0_0 , in_channels=2 , out_channels=2 , flip_sin_to_cos=_A , use_timestep_embedding=_A , time_embedding_type="""fourier""" , mid_block_type="""UNetMidBlock1D""" , down_block_types=("""DownBlock1DNoSkip""", """DownBlock1D""", """AttnDownBlock1D""") , up_block_types=("""AttnUpBlock1D""", """UpBlock1D""", """UpBlock1DNoSkip""") , )
UpperCamelCase : Tuple = IPNDMScheduler()
UpperCamelCase : int = {
"""unet""": unet,
"""scheduler""": scheduler,
}
return components
def _a ( self , _A , _A=0 ):
'''simple docstring'''
if str(_A ).startswith("""mps""" ):
UpperCamelCase : Tuple = torch.manual_seed(_A )
else:
UpperCamelCase : Union[str, Any] = torch.Generator(device=_A ).manual_seed(_A )
UpperCamelCase : List[Any] = {
"""batch_size""": 1,
"""generator""": generator,
"""num_inference_steps""": 4,
}
return inputs
def _a ( self ):
'''simple docstring'''
UpperCamelCase : Dict = """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCamelCase : Optional[int] = self.get_dummy_components()
UpperCamelCase : Optional[int] = DanceDiffusionPipeline(**_A )
UpperCamelCase : List[Any] = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
UpperCamelCase : str = self.get_dummy_inputs(_A )
UpperCamelCase : Any = pipe(**_A )
UpperCamelCase : List[str] = output.audios
UpperCamelCase : Dict = audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
UpperCamelCase : Any = np.array([-0.72_65, 1.00_00, -0.83_88, 0.11_75, 0.94_98, -1.00_00] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def _a ( self ):
'''simple docstring'''
return super().test_save_load_local()
@skip_mps
def _a ( self ):
'''simple docstring'''
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
@skip_mps
def _a ( self ):
'''simple docstring'''
return super().test_save_load_optional_components()
@skip_mps
def _a ( self ):
'''simple docstring'''
return super().test_attention_slicing_forward_pass()
def _a ( self ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class lowercase__ ( unittest.TestCase ):
"""simple docstring"""
def _a ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self ):
'''simple docstring'''
UpperCamelCase : List[str] = torch_device
UpperCamelCase : str = DanceDiffusionPipeline.from_pretrained("""harmonai/maestro-150k""" )
UpperCamelCase : Optional[int] = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
UpperCamelCase : List[str] = torch.manual_seed(0 )
UpperCamelCase : Optional[Any] = pipe(generator=_A , num_inference_steps=1_0_0 , audio_length_in_s=4.0_96 )
UpperCamelCase : Union[str, Any] = output.audios
UpperCamelCase : Dict = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
UpperCamelCase : Any = np.array([-0.01_92, -0.02_31, -0.03_18, -0.00_59, 0.00_02, -0.00_20] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
def _a ( self ):
'''simple docstring'''
UpperCamelCase : List[Any] = torch_device
UpperCamelCase : Any = DanceDiffusionPipeline.from_pretrained("""harmonai/maestro-150k""" , torch_dtype=torch.floataa )
UpperCamelCase : Union[str, Any] = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
UpperCamelCase : Any = torch.manual_seed(0 )
UpperCamelCase : Union[str, Any] = pipe(generator=_A , num_inference_steps=1_0_0 , audio_length_in_s=4.0_96 )
UpperCamelCase : int = output.audios
UpperCamelCase : Optional[int] = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
UpperCamelCase : Dict = np.array([-0.03_67, -0.04_88, -0.07_71, -0.05_25, -0.04_44, -0.03_41] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
| 102 |
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
__snake_case :str =False
class lowerCAmelCase__ ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase ):
def __UpperCamelCase ( self : Dict ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self : Union[str, Any] ) -> List[Any]:
A = VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa )
pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
A = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
A = torch.manual_seed(0 )
A = pipe.dual_guided(
prompt='first prompt' , image=__UpperCamelCase , text_to_image_strength=0.7_5 , generator=__UpperCamelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(__UpperCamelCase )
A = VersatileDiffusionPipeline.from_pretrained(__UpperCamelCase , torch_dtype=torch.floataa )
pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
A = generator.manual_seed(0 )
A = pipe.dual_guided(
prompt='first prompt' , image=__UpperCamelCase , text_to_image_strength=0.7_5 , generator=__UpperCamelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def __UpperCamelCase ( self : Tuple ) -> List[str]:
A = VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa )
pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
A = 'cyberpunk 2077'
A = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
A = torch.manual_seed(0 )
A = pipe.dual_guided(
prompt=__UpperCamelCase , image=__UpperCamelCase , text_to_image_strength=0.7_5 , generator=__UpperCamelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy' , ).images
A = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
A = np.array([0.1_4_4_8, 0.1_6_1_9, 0.1_7_4_1, 0.1_0_8_6, 0.1_1_4_7, 0.1_1_2_8, 0.1_1_9_9, 0.1_1_6_5, 0.1_0_0_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
A = 'A painting of a squirrel eating a burger '
A = torch.manual_seed(0 )
A = pipe.text_to_image(
prompt=__UpperCamelCase , generator=__UpperCamelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy' ).images
A = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
A = np.array([0.3_3_6_7, 0.3_1_6_9, 0.2_6_5_6, 0.3_8_7_0, 0.4_7_9_0, 0.3_7_9_6, 0.4_0_0_9, 0.4_8_7_8, 0.4_7_7_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
A = pipe.image_variation(__UpperCamelCase , generator=__UpperCamelCase , output_type='numpy' ).images
A = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
A = np.array([0.3_0_7_6, 0.3_1_2_3, 0.3_2_8_4, 0.3_7_8_2, 0.3_7_7_0, 0.3_8_9_4, 0.4_2_9_7, 0.4_3_3_1, 0.4_4_5_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 | 106 | 0 |
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
lowerCAmelCase : Any =logging.getLogger(__name__)
lowerCAmelCase : int ="Hello world! cécé herlolip"
lowerCAmelCase : Optional[int] =namedtuple(
"BertAbsConfig",
[
"temp_dir",
"large",
"use_bert_emb",
"finetune_bert",
"encoder",
"share_emb",
"max_pos",
"enc_layers",
"enc_hidden_size",
"enc_heads",
"enc_ff_size",
"enc_dropout",
"dec_layers",
"dec_hidden_size",
"dec_heads",
"dec_ff_size",
"dec_dropout",
],
)
def A__ ( __A , __A ):
'''simple docstring'''
_lowerCamelCase : Tuple = BertAbsConfig(
temp_dir=""".""" , finetune_bert=lowerCamelCase_ , large=lowerCamelCase_ , share_emb=lowerCamelCase_ , use_bert_emb=lowerCamelCase_ , encoder="""bert""" , max_pos=512 , enc_layers=6 , enc_hidden_size=512 , enc_heads=8 , enc_ff_size=512 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=768 , dec_heads=8 , dec_ff_size=2_048 , dec_dropout=0.2 , )
_lowerCamelCase : int = torch.load(lowerCamelCase_ , lambda __A , __A : storage )
_lowerCamelCase : List[str] = AbsSummarizer(lowerCamelCase_ , torch.device("""cpu""" ) , lowerCamelCase_ )
original.eval()
_lowerCamelCase : Optional[int] = BertAbsSummarizer(lowerCamelCase_ , torch.device("""cpu""" ) )
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info("""convert the model""" )
new_model.bert.load_state_dict(original.bert.state_dict() )
new_model.decoder.load_state_dict(original.decoder.state_dict() )
new_model.generator.load_state_dict(original.generator.state_dict() )
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info("""Make sure that the models' outputs are identical""" )
_lowerCamelCase : int = BertTokenizer.from_pretrained("""bert-base-uncased""" )
# prepare the model inputs
_lowerCamelCase : Optional[Any] = tokenizer.encode("""This is sample éàalj'-.""" )
encoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(lowerCamelCase_ )) )
_lowerCamelCase : List[str] = torch.tensor(lowerCamelCase_ ).unsqueeze(0 )
_lowerCamelCase : Optional[int] = tokenizer.encode("""This is sample 3 éàalj'-.""" )
decoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(lowerCamelCase_ )) )
_lowerCamelCase : Optional[Any] = torch.tensor(lowerCamelCase_ ).unsqueeze(0 )
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0
# forward pass
_lowerCamelCase : Optional[int] = encoder_input_ids
_lowerCamelCase : Optional[Any] = decoder_input_ids
_lowerCamelCase : List[str] = None
_lowerCamelCase : Tuple = None
_lowerCamelCase : int = None
_lowerCamelCase : List[Any] = None
_lowerCamelCase : Optional[int] = None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
_lowerCamelCase : str = original(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )[0]
_lowerCamelCase : Optional[Any] = original.generator(lowerCamelCase_ )
_lowerCamelCase : List[Any] = new_model(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )[0]
_lowerCamelCase : str = new_model.generator(lowerCamelCase_ )
_lowerCamelCase : int = torch.max(torch.abs(output_converted_model - output_original_model ) ).item()
print("""Maximum absolute difference beween weights: {:.2f}""".format(lowerCamelCase_ ) )
_lowerCamelCase : Optional[int] = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item()
print("""Maximum absolute difference beween weights: {:.2f}""".format(lowerCamelCase_ ) )
_lowerCamelCase : Any = torch.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1E-3 )
if are_identical:
logging.info("""all weights are equal up to 1e-3""" )
else:
raise ValueError("""the weights are different. The new model is likely different from the original one.""" )
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info("""saving the model's state dictionary""" )
torch.save(
new_model.state_dict() , """./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin""" )
if __name__ == "__main__":
lowerCAmelCase : Optional[int] =argparse.ArgumentParser()
parser.add_argument(
"--bertabs_checkpoint_path",
default=None,
type=str,
required=True,
help="Path the official PyTorch dump.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the output PyTorch model.",
)
lowerCAmelCase : Dict =parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
)
| 712 | from math import log
from scipy.constants import Boltzmann, physical_constants
lowerCAmelCase : List[Any] =300 # TEMPERATURE (unit = K)
def A__ ( __A , __A , __A , ):
'''simple docstring'''
if donor_conc <= 0:
raise ValueError("""Donor concentration should be positive""" )
elif acceptor_conc <= 0:
raise ValueError("""Acceptor concentration should be positive""" )
elif intrinsic_conc <= 0:
raise ValueError("""Intrinsic concentration should be positive""" )
elif donor_conc <= intrinsic_conc:
raise ValueError(
"""Donor concentration should be greater than intrinsic concentration""" )
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
"""Acceptor concentration should be greater than intrinsic concentration""" )
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2 )
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 15 | 0 |
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
UpperCamelCase = '\\n@INPROCEEDINGS{Papineni02bleu:a,\n author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},\n title = {BLEU: a Method for Automatic Evaluation of Machine Translation},\n booktitle = {},\n year = {2002},\n pages = {311--318}\n}\n@inproceedings{lin-och-2004-orange,\n title = "{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation",\n author = "Lin, Chin-Yew and\n Och, Franz Josef",\n booktitle = "{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics",\n month = "aug 23{--}aug 27",\n year = "2004",\n address = "Geneva, Switzerland",\n publisher = "COLING",\n url = "https://www.aclweb.org/anthology/C04-1072",\n pages = "501--507",\n}\n'
UpperCamelCase = '\\nBLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.\nQuality is considered to be the correspondence between a machine\'s output and that of a human: "the closer a machine translation is to a professional human translation,\nthe better it is" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and\nremains one of the most popular automated and inexpensive metrics.\n\nScores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.\nThose scores are then averaged over the whole corpus to reach an estimate of the translation\'s overall quality. Intelligibility or grammatical correctness\nare not taken into account[citation needed].\n\nBLEU\'s output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1\nrepresenting more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the\nreference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional\nreference translations will increase the BLEU score.\n'
UpperCamelCase = '\nComputes BLEU score of translated segments against one or more references.\nArgs:\n predictions: list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n max_order: Maximum n-gram order to use when computing BLEU score.\n smooth: Whether or not to apply Lin et al. 2004 smoothing.\nReturns:\n \'bleu\': bleu score,\n \'precisions\': geometric mean of n-gram precisions,\n \'brevity_penalty\': brevity penalty,\n \'length_ratio\': ratio of lengths,\n \'translation_length\': translation_length,\n \'reference_length\': reference_length\nExamples:\n\n >>> predictions = [\n ... ["hello", "there", "general", "kenobi"], # tokenized prediction of the first sample\n ... ["foo", "bar", "foobar"] # tokenized prediction of the second sample\n ... ]\n >>> references = [\n ... [["hello", "there", "general", "kenobi"], ["hello", "there", "!"]], # tokenized references for the first sample (2 references)\n ... [["foo", "bar", "foobar"]] # tokenized references for the second sample (1 reference)\n ... ]\n >>> bleu = datasets.load_metric("bleu")\n >>> results = bleu.compute(predictions=predictions, references=references)\n >>> print(results["bleu"])\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCamelCase ( datasets.Metric ):
"""simple docstring"""
def a ( self : Optional[int] ) -> List[str]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ),
} ) , codebase_urls=["https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py"] , reference_urls=[
"https://en.wikipedia.org/wiki/BLEU",
"https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213",
] , )
def a ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int=4 , SCREAMING_SNAKE_CASE__ : Optional[int]=False ) -> int:
lowerCAmelCase__ = compute_bleu(
reference_corpus=SCREAMING_SNAKE_CASE__ , translation_corpus=SCREAMING_SNAKE_CASE__ , max_order=SCREAMING_SNAKE_CASE__ , smooth=SCREAMING_SNAKE_CASE__ )
((lowerCAmelCase__) , (lowerCAmelCase__) , (lowerCAmelCase__) , (lowerCAmelCase__) , (lowerCAmelCase__) , (lowerCAmelCase__)) = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
}
| 61 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase__ : Union[str, Any] = logging.get_logger(__name__)
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
"""simple docstring"""
# initialize config
if "resnet-50" in model_name:
_SCREAMING_SNAKE_CASE = ResNetConfig.from_pretrained("""microsoft/resnet-50""" )
elif "resnet-101" in model_name:
_SCREAMING_SNAKE_CASE = ResNetConfig.from_pretrained("""microsoft/resnet-101""" )
else:
raise ValueError("""Model name should include either resnet50 or resnet101""" )
_SCREAMING_SNAKE_CASE = DetrConfig(use_timm_backbone=SCREAMING_SNAKE_CASE_ , backbone_config=SCREAMING_SNAKE_CASE_ )
# set label attributes
_SCREAMING_SNAKE_CASE = """panoptic""" in model_name
if is_panoptic:
_SCREAMING_SNAKE_CASE = 2_50
else:
_SCREAMING_SNAKE_CASE = 91
_SCREAMING_SNAKE_CASE = """huggingface/label-files"""
_SCREAMING_SNAKE_CASE = """coco-detection-id2label.json"""
_SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type="""dataset""" ) , """r""" ) )
_SCREAMING_SNAKE_CASE = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()}
_SCREAMING_SNAKE_CASE = idalabel
_SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()}
return config, is_panoptic
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> str:
"""simple docstring"""
# here we list all keys to be renamed (original name on the left, our name on the right)
_SCREAMING_SNAKE_CASE = []
# stem
# fmt: off
rename_keys.append(("""backbone.0.body.conv1.weight""", """backbone.conv_encoder.model.embedder.embedder.convolution.weight""") )
rename_keys.append(("""backbone.0.body.bn1.weight""", """backbone.conv_encoder.model.embedder.embedder.normalization.weight""") )
rename_keys.append(("""backbone.0.body.bn1.bias""", """backbone.conv_encoder.model.embedder.embedder.normalization.bias""") )
rename_keys.append(("""backbone.0.body.bn1.running_mean""", """backbone.conv_encoder.model.embedder.embedder.normalization.running_mean""") )
rename_keys.append(("""backbone.0.body.bn1.running_var""", """backbone.conv_encoder.model.embedder.embedder.normalization.running_var""") )
# stages
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
# shortcut
if layer_idx == 0:
rename_keys.append(
(
F"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight",
F"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight",
) )
rename_keys.append(
(
F"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight",
F"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight",
) )
rename_keys.append(
(
F"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias",
F"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias",
) )
rename_keys.append(
(
F"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean",
F"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean",
) )
rename_keys.append(
(
F"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var",
F"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var",
) )
# 3 convs
for i in range(3 ):
rename_keys.append(
(
F"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight",
F"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight",
) )
rename_keys.append(
(
F"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight",
F"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight",
) )
rename_keys.append(
(
F"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias",
F"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias",
) )
rename_keys.append(
(
F"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean",
F"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean",
) )
rename_keys.append(
(
F"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var",
F"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var",
) )
# fmt: on
for i in range(config.encoder_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(
F"transformer.encoder.layers.{i}.self_attn.out_proj.weight",
F"encoder.layers.{i}.self_attn.out_proj.weight",
) )
rename_keys.append(
(F"transformer.encoder.layers.{i}.self_attn.out_proj.bias", F"encoder.layers.{i}.self_attn.out_proj.bias") )
rename_keys.append((F"transformer.encoder.layers.{i}.linear1.weight", F"encoder.layers.{i}.fc1.weight") )
rename_keys.append((F"transformer.encoder.layers.{i}.linear1.bias", F"encoder.layers.{i}.fc1.bias") )
rename_keys.append((F"transformer.encoder.layers.{i}.linear2.weight", F"encoder.layers.{i}.fc2.weight") )
rename_keys.append((F"transformer.encoder.layers.{i}.linear2.bias", F"encoder.layers.{i}.fc2.bias") )
rename_keys.append(
(F"transformer.encoder.layers.{i}.norm1.weight", F"encoder.layers.{i}.self_attn_layer_norm.weight") )
rename_keys.append(
(F"transformer.encoder.layers.{i}.norm1.bias", F"encoder.layers.{i}.self_attn_layer_norm.bias") )
rename_keys.append(
(F"transformer.encoder.layers.{i}.norm2.weight", F"encoder.layers.{i}.final_layer_norm.weight") )
rename_keys.append((F"transformer.encoder.layers.{i}.norm2.bias", F"encoder.layers.{i}.final_layer_norm.bias") )
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(
F"transformer.decoder.layers.{i}.self_attn.out_proj.weight",
F"decoder.layers.{i}.self_attn.out_proj.weight",
) )
rename_keys.append(
(F"transformer.decoder.layers.{i}.self_attn.out_proj.bias", F"decoder.layers.{i}.self_attn.out_proj.bias") )
rename_keys.append(
(
F"transformer.decoder.layers.{i}.multihead_attn.out_proj.weight",
F"decoder.layers.{i}.encoder_attn.out_proj.weight",
) )
rename_keys.append(
(
F"transformer.decoder.layers.{i}.multihead_attn.out_proj.bias",
F"decoder.layers.{i}.encoder_attn.out_proj.bias",
) )
rename_keys.append((F"transformer.decoder.layers.{i}.linear1.weight", F"decoder.layers.{i}.fc1.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.linear1.bias", F"decoder.layers.{i}.fc1.bias") )
rename_keys.append((F"transformer.decoder.layers.{i}.linear2.weight", F"decoder.layers.{i}.fc2.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.linear2.bias", F"decoder.layers.{i}.fc2.bias") )
rename_keys.append(
(F"transformer.decoder.layers.{i}.norm1.weight", F"decoder.layers.{i}.self_attn_layer_norm.weight") )
rename_keys.append(
(F"transformer.decoder.layers.{i}.norm1.bias", F"decoder.layers.{i}.self_attn_layer_norm.bias") )
rename_keys.append(
(F"transformer.decoder.layers.{i}.norm2.weight", F"decoder.layers.{i}.encoder_attn_layer_norm.weight") )
rename_keys.append(
(F"transformer.decoder.layers.{i}.norm2.bias", F"decoder.layers.{i}.encoder_attn_layer_norm.bias") )
rename_keys.append(
(F"transformer.decoder.layers.{i}.norm3.weight", F"decoder.layers.{i}.final_layer_norm.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.norm3.bias", F"decoder.layers.{i}.final_layer_norm.bias") )
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
("""input_proj.weight""", """input_projection.weight"""),
("""input_proj.bias""", """input_projection.bias"""),
("""query_embed.weight""", """query_position_embeddings.weight"""),
("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""),
("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""),
("""class_embed.weight""", """class_labels_classifier.weight"""),
("""class_embed.bias""", """class_labels_classifier.bias"""),
("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""),
("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""),
("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""),
("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""),
("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""),
("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""),
] )
return rename_keys
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = state_dict.pop(SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = val
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = """"""
if is_panoptic:
_SCREAMING_SNAKE_CASE = """detr."""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
_SCREAMING_SNAKE_CASE = state_dict.pop(F"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight" )
_SCREAMING_SNAKE_CASE = state_dict.pop(F"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
_SCREAMING_SNAKE_CASE = in_proj_weight[:2_56, :]
_SCREAMING_SNAKE_CASE = in_proj_bias[:2_56]
_SCREAMING_SNAKE_CASE = in_proj_weight[2_56:5_12, :]
_SCREAMING_SNAKE_CASE = in_proj_bias[2_56:5_12]
_SCREAMING_SNAKE_CASE = in_proj_weight[-2_56:, :]
_SCREAMING_SNAKE_CASE = in_proj_bias[-2_56:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
_SCREAMING_SNAKE_CASE = state_dict.pop(F"{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight" )
_SCREAMING_SNAKE_CASE = state_dict.pop(F"{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
_SCREAMING_SNAKE_CASE = in_proj_weight[:2_56, :]
_SCREAMING_SNAKE_CASE = in_proj_bias[:2_56]
_SCREAMING_SNAKE_CASE = in_proj_weight[2_56:5_12, :]
_SCREAMING_SNAKE_CASE = in_proj_bias[2_56:5_12]
_SCREAMING_SNAKE_CASE = in_proj_weight[-2_56:, :]
_SCREAMING_SNAKE_CASE = in_proj_bias[-2_56:]
# read in weights + bias of input projection layer of cross-attention
_SCREAMING_SNAKE_CASE = state_dict.pop(
F"{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight" )
_SCREAMING_SNAKE_CASE = state_dict.pop(F"{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias" )
# next, add query, keys and values (in that order) of cross-attention to the state dict
_SCREAMING_SNAKE_CASE = in_proj_weight_cross_attn[:2_56, :]
_SCREAMING_SNAKE_CASE = in_proj_bias_cross_attn[:2_56]
_SCREAMING_SNAKE_CASE = in_proj_weight_cross_attn[2_56:5_12, :]
_SCREAMING_SNAKE_CASE = in_proj_bias_cross_attn[2_56:5_12]
_SCREAMING_SNAKE_CASE = in_proj_weight_cross_attn[-2_56:, :]
_SCREAMING_SNAKE_CASE = in_proj_bias_cross_attn[-2_56:]
def lowerCAmelCase_ ( ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = """http://images.cocodataset.org/val2017/000000039769.jpg"""
_SCREAMING_SNAKE_CASE = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw )
return im
@torch.no_grad()
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=False ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = get_detr_config(SCREAMING_SNAKE_CASE_ )
# load original model from torch hub
_SCREAMING_SNAKE_CASE = {
"""detr-resnet-50""": """detr_resnet50""",
"""detr-resnet-101""": """detr_resnet101""",
}
logger.info(F"Converting model {model_name}..." )
_SCREAMING_SNAKE_CASE = torch.hub.load("""facebookresearch/detr""" , model_name_to_original_name[model_name] , pretrained=SCREAMING_SNAKE_CASE_ ).eval()
_SCREAMING_SNAKE_CASE = detr.state_dict()
# rename keys
for src, dest in create_rename_keys(SCREAMING_SNAKE_CASE_ ):
if is_panoptic:
_SCREAMING_SNAKE_CASE = """detr.""" + src
rename_key(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# query, key and value matrices need special treatment
read_in_q_k_v(SCREAMING_SNAKE_CASE_ , is_panoptic=SCREAMING_SNAKE_CASE_ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
_SCREAMING_SNAKE_CASE = """detr.model.""" if is_panoptic else """model."""
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("""detr""" )
and not key.startswith("""class_labels_classifier""" )
and not key.startswith("""bbox_predictor""" )
):
_SCREAMING_SNAKE_CASE = state_dict.pop(SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
_SCREAMING_SNAKE_CASE = state_dict.pop(SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = val
elif key.startswith("""bbox_attention""" ) or key.startswith("""mask_head""" ):
continue
else:
_SCREAMING_SNAKE_CASE = state_dict.pop(SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = val
else:
if not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ):
_SCREAMING_SNAKE_CASE = state_dict.pop(SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = val
# finally, create HuggingFace model and load state dict
_SCREAMING_SNAKE_CASE = DetrForSegmentation(SCREAMING_SNAKE_CASE_ ) if is_panoptic else DetrForObjectDetection(SCREAMING_SNAKE_CASE_ )
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
model.eval()
# verify our conversion on an image
_SCREAMING_SNAKE_CASE = """coco_panoptic""" if is_panoptic else """coco_detection"""
_SCREAMING_SNAKE_CASE = DetrImageProcessor(format=SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = processor(images=prepare_img() , return_tensors="""pt""" )
_SCREAMING_SNAKE_CASE = encoding["""pixel_values"""]
_SCREAMING_SNAKE_CASE = detr(SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = model(SCREAMING_SNAKE_CASE_ )
assert torch.allclose(outputs.logits , original_outputs["""pred_logits"""] , atol=1e-3 )
assert torch.allclose(outputs.pred_boxes , original_outputs["""pred_boxes"""] , atol=1e-3 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["""pred_masks"""] , atol=1e-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(F"Saving PyTorch model and image processor to {pytorch_dump_folder_path}..." )
Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
processor.save_pretrained(SCREAMING_SNAKE_CASE_ )
if push_to_hub:
# Upload model and image processor to the hub
logger.info("""Uploading PyTorch model and image processor to the hub...""" )
model.push_to_hub(F"nielsr/{model_name}" )
processor.push_to_hub(F"nielsr/{model_name}" )
if __name__ == "__main__":
UpperCamelCase__ : Any = argparse.ArgumentParser()
parser.add_argument(
"--model_name",
default="detr-resnet-50",
type=str,
choices=["detr-resnet-50", "detr-resnet-101"],
help="Name of the DETR model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
parser.add_argument("--push_to_hub", action="store_true", help="Whether to push the model to the hub or not.")
UpperCamelCase__ : Any = parser.parse_args()
convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 591 | 0 |
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
ImageTextPipelineOutput,
UniDiffuserPipeline,
)
else:
from .modeling_text_decoder import UniDiffuserTextDecoder
from .modeling_uvit import UniDiffuserModel, UTransformeraDModel
from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline
| 384 |
'''simple docstring'''
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def _UpperCamelCase ( __UpperCamelCase ) -> str: # picklable for multiprocessing
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def _UpperCamelCase ( ) -> str:
with parallel_backend('spark' ):
assert ParallelBackendConfig.backend_name == "spark"
lowerCamelCase_ = [1, 2, 3]
with pytest.raises(__UpperCamelCase ):
with parallel_backend('unsupported backend' ):
map_nested(__UpperCamelCase ,__UpperCamelCase ,num_proc=2 )
with pytest.raises(__UpperCamelCase ):
with parallel_backend('unsupported backend' ):
map_nested(__UpperCamelCase ,__UpperCamelCase ,num_proc=-1 )
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize('num_proc' ,[2, -1] )
def _UpperCamelCase ( __UpperCamelCase ) -> Optional[int]:
lowerCamelCase_ = [1, 2]
lowerCamelCase_ = {'a': 1, 'b': 2}
lowerCamelCase_ = {'a': [1, 2], 'b': [3, 4]}
lowerCamelCase_ = {'a': {'1': 1}, 'b': 2}
lowerCamelCase_ = {'a': 1, 'b': 2, 'c': 3, 'd': 4}
lowerCamelCase_ = [2, 3]
lowerCamelCase_ = {'a': 2, 'b': 3}
lowerCamelCase_ = {'a': [2, 3], 'b': [4, 5]}
lowerCamelCase_ = {'a': {'1': 2}, 'b': 3}
lowerCamelCase_ = {'a': 2, 'b': 3, 'c': 4, 'd': 5}
with parallel_backend('spark' ):
assert map_nested(__UpperCamelCase ,__UpperCamelCase ,num_proc=__UpperCamelCase ) == expected_map_nested_sa
assert map_nested(__UpperCamelCase ,__UpperCamelCase ,num_proc=__UpperCamelCase ) == expected_map_nested_sa
assert map_nested(__UpperCamelCase ,__UpperCamelCase ,num_proc=__UpperCamelCase ) == expected_map_nested_sa
assert map_nested(__UpperCamelCase ,__UpperCamelCase ,num_proc=__UpperCamelCase ) == expected_map_nested_sa
assert map_nested(__UpperCamelCase ,__UpperCamelCase ,num_proc=__UpperCamelCase ) == expected_map_nested_sa
| 384 | 1 |
"""simple docstring"""
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class lowerCamelCase (unittest.TestCase ):
'''simple docstring'''
def __init__( self : Dict , _snake_case : List[str] , _snake_case : Optional[int]=7 , _snake_case : Optional[Any]=3 , _snake_case : Dict=18 , _snake_case : List[Any]=30 , _snake_case : List[Any]=400 , _snake_case : Tuple=True , _snake_case : Union[str, Any]=None , _snake_case : Any=True , ) -> Dict:
SCREAMING_SNAKE_CASE__ = size if size is not None else {"height": 18, "width": 18}
SCREAMING_SNAKE_CASE__ = parent
SCREAMING_SNAKE_CASE__ = batch_size
SCREAMING_SNAKE_CASE__ = num_channels
SCREAMING_SNAKE_CASE__ = image_size
SCREAMING_SNAKE_CASE__ = min_resolution
SCREAMING_SNAKE_CASE__ = max_resolution
SCREAMING_SNAKE_CASE__ = do_resize
SCREAMING_SNAKE_CASE__ = size
SCREAMING_SNAKE_CASE__ = do_normalize
def lowerCAmelCase_ ( self : List[str] ) -> List[Any]:
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.8866_4436_3403_3203, 0.6618_8293_6954_4983, 0.3891_7464_0178_6804],
[-0.6042_5591_4688_1104, -0.0_2295_0088_6052_8469, 0.5423_7973_6900_3296],
] ),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class lowerCamelCase (_SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
a = ImageGPTImageProcessor if is_vision_available() else None
def lowerCAmelCase_ ( self : Optional[int] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = ImageGPTImageProcessingTester(self )
@property
def lowerCAmelCase_ ( self : Any ) -> Optional[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase_ ( self : List[str] ) -> Any:
SCREAMING_SNAKE_CASE__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_snake_case , "clusters" ) )
self.assertTrue(hasattr(_snake_case , "do_resize" ) )
self.assertTrue(hasattr(_snake_case , "size" ) )
self.assertTrue(hasattr(_snake_case , "do_normalize" ) )
def lowerCAmelCase_ ( self : Optional[int] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 18, "width": 18} )
SCREAMING_SNAKE_CASE__ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"height": 42, "width": 42} )
def lowerCAmelCase_ ( self : List[str] ) -> List[str]:
SCREAMING_SNAKE_CASE__ = self.image_processing_class(**self.image_processor_dict )
SCREAMING_SNAKE_CASE__ = json.loads(image_processor.to_json_string() )
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(_snake_case , obj[key] ) )
else:
self.assertEqual(obj[key] , _snake_case )
def lowerCAmelCase_ ( self : Tuple ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ = os.path.join(_snake_case , "image_processor.json" )
image_processor_first.to_json_file(_snake_case )
SCREAMING_SNAKE_CASE__ = self.image_processing_class.from_json_file(_snake_case ).to_dict()
SCREAMING_SNAKE_CASE__ = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(_snake_case , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , _snake_case )
def lowerCAmelCase_ ( self : Optional[int] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(_snake_case )
SCREAMING_SNAKE_CASE__ = self.image_processing_class.from_pretrained(_snake_case ).to_dict()
SCREAMING_SNAKE_CASE__ = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(_snake_case , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , _snake_case )
@unittest.skip("ImageGPT requires clusters at initialization" )
def lowerCAmelCase_ ( self : Dict ) -> Any:
pass
def SCREAMING_SNAKE_CASE ( ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = load_dataset("hf-internal-testing/fixtures_image_utils" , split="test" )
SCREAMING_SNAKE_CASE__ = Image.open(dataset[4]["file"] )
SCREAMING_SNAKE_CASE__ = Image.open(dataset[5]["file"] )
SCREAMING_SNAKE_CASE__ = [imagea, imagea]
return images
@require_vision
@require_torch
class lowerCamelCase (unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase_ ( self : Optional[int] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = ImageGPTImageProcessor.from_pretrained("openai/imagegpt-small" )
SCREAMING_SNAKE_CASE__ = prepare_images()
# test non-batched
SCREAMING_SNAKE_CASE__ = image_processing(images[0] , return_tensors="pt" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (1, 1024) )
SCREAMING_SNAKE_CASE__ = [306, 191, 191]
self.assertEqual(encoding.input_ids[0, :3].tolist() , _snake_case )
# test batched
SCREAMING_SNAKE_CASE__ = image_processing(_snake_case , return_tensors="pt" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (2, 1024) )
SCREAMING_SNAKE_CASE__ = [303, 13, 13]
self.assertEqual(encoding.input_ids[1, -3:].tolist() , _snake_case )
| 159 | """simple docstring"""
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def SCREAMING_SNAKE_CASE ( __UpperCAmelCase ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = image.size
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
SCREAMING_SNAKE_CASE__ = image.resize((w, h) , resample=PIL_INTERPOLATION["lanczos"] )
SCREAMING_SNAKE_CASE__ = np.array(__UpperCAmelCase ).astype(np.floataa ) / 255.0
SCREAMING_SNAKE_CASE__ = image[None].transpose(0 , 3 , 1 , 2 )
SCREAMING_SNAKE_CASE__ = torch.from_numpy(__UpperCAmelCase )
return 2.0 * image - 1.0
class lowerCamelCase (_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : List[Any] , _snake_case : VQModel , _snake_case : UNetaDModel , _snake_case : Union[
DDIMScheduler,
PNDMScheduler,
LMSDiscreteScheduler,
EulerDiscreteScheduler,
EulerAncestralDiscreteScheduler,
DPMSolverMultistepScheduler,
] , ) -> str:
super().__init__()
self.register_modules(vqvae=_snake_case , unet=_snake_case , scheduler=_snake_case )
@torch.no_grad()
def __call__( self : Tuple , _snake_case : Union[torch.Tensor, PIL.Image.Image] = None , _snake_case : Optional[int] = 1 , _snake_case : Optional[int] = 100 , _snake_case : Optional[float] = 0.0 , _snake_case : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _snake_case : Optional[str] = "pil" , _snake_case : bool = True , ) -> Union[Tuple, ImagePipelineOutput]:
if isinstance(_snake_case , PIL.Image.Image ):
SCREAMING_SNAKE_CASE__ = 1
elif isinstance(_snake_case , torch.Tensor ):
SCREAMING_SNAKE_CASE__ = image.shape[0]
else:
raise ValueError(F"""`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(_snake_case )}""" )
if isinstance(_snake_case , PIL.Image.Image ):
SCREAMING_SNAKE_CASE__ = preprocess(_snake_case )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
SCREAMING_SNAKE_CASE__ = (batch_size, self.unet.config.in_channels // 2, height, width)
SCREAMING_SNAKE_CASE__ = next(self.unet.parameters() ).dtype
SCREAMING_SNAKE_CASE__ = randn_tensor(_snake_case , generator=_snake_case , device=self.device , dtype=_snake_case )
SCREAMING_SNAKE_CASE__ = image.to(device=self.device , dtype=_snake_case )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(_snake_case , device=self.device )
SCREAMING_SNAKE_CASE__ = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
SCREAMING_SNAKE_CASE__ = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
SCREAMING_SNAKE_CASE__ = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
SCREAMING_SNAKE_CASE__ = {}
if accepts_eta:
SCREAMING_SNAKE_CASE__ = eta
for t in self.progress_bar(_snake_case ):
# concat latents and low resolution image in the channel dimension.
SCREAMING_SNAKE_CASE__ = torch.cat([latents, image] , dim=1 )
SCREAMING_SNAKE_CASE__ = self.scheduler.scale_model_input(_snake_case , _snake_case )
# predict the noise residual
SCREAMING_SNAKE_CASE__ = self.unet(_snake_case , _snake_case ).sample
# compute the previous noisy sample x_t -> x_t-1
SCREAMING_SNAKE_CASE__ = self.scheduler.step(_snake_case , _snake_case , _snake_case , **_snake_case ).prev_sample
# decode the image latents with the VQVAE
SCREAMING_SNAKE_CASE__ = self.vqvae.decode(_snake_case ).sample
SCREAMING_SNAKE_CASE__ = torch.clamp(_snake_case , -1.0 , 1.0 )
SCREAMING_SNAKE_CASE__ = image / 2 + 0.5
SCREAMING_SNAKE_CASE__ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE__ = self.numpy_to_pil(_snake_case )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_snake_case )
| 159 | 1 |
lowerCamelCase__ = {
"""joule""": 1.0,
"""kilojoule""": 1000,
"""megajoule""": 100_0000,
"""gigajoule""": 10_0000_0000,
"""wattsecond""": 1.0,
"""watthour""": 3600,
"""kilowatthour""": 360_0000,
"""newtonmeter""": 1.0,
"""calorie_nutr""": 4186.8,
"""kilocalorie_nutr""": 418_6800.00,
"""electronvolt""": 1.602176634e-19,
"""britishthermalunit_it""": 1055.0_5585,
"""footpound""": 1.35_5818,
}
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : float ):
"""simple docstring"""
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
__a = (
f"Incorrect 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n"
f"Valid values are: {', '.join(_SCREAMING_SNAKE_CASE )}"
)
raise ValueError(_SCREAMING_SNAKE_CASE )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 711 |
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
lowerCamelCase__ = random.Random()
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : int=1.0 , _SCREAMING_SNAKE_CASE : Any=None , _SCREAMING_SNAKE_CASE : Any=None ):
"""simple docstring"""
if rng is None:
__a = global_rng
__a = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __init__( self : Dict , __lowercase : Tuple , __lowercase : Tuple=7 , __lowercase : Optional[int]=400 , __lowercase : int=2000 , __lowercase : List[Any]=2048 , __lowercase : List[str]=128 , __lowercase : Union[str, Any]=1 , __lowercase : str=512 , __lowercase : List[str]=30 , __lowercase : Tuple=44100 , ):
'''simple docstring'''
__a = parent
__a = batch_size
__a = min_seq_length
__a = max_seq_length
__a = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__a = spectrogram_length
__a = feature_size
__a = num_audio_channels
__a = hop_length
__a = chunk_length
__a = sampling_rate
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def UpperCamelCase_ ( self : Optional[int] , __lowercase : Optional[int]=False , __lowercase : Any=False ):
'''simple docstring'''
def _flatten(__lowercase : Tuple ):
return list(itertools.chain(*__lowercase ) )
if equal_length:
__a = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
__a = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__a = [np.asarray(__lowercase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ , unittest.TestCase ):
__lowerCamelCase : Dict =TvltFeatureExtractor
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
__a = TvltFeatureExtractionTester(self )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
__a = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(__lowercase , """spectrogram_length""" ) )
self.assertTrue(hasattr(__lowercase , """feature_size""" ) )
self.assertTrue(hasattr(__lowercase , """num_audio_channels""" ) )
self.assertTrue(hasattr(__lowercase , """hop_length""" ) )
self.assertTrue(hasattr(__lowercase , """chunk_length""" ) )
self.assertTrue(hasattr(__lowercase , """sampling_rate""" ) )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
__a = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__a = feat_extract_first.save_pretrained(__lowercase )[0]
check_json_file_has_correct_format(__lowercase )
__a = self.feature_extraction_class.from_pretrained(__lowercase )
__a = feat_extract_first.to_dict()
__a = feat_extract_second.to_dict()
__a = dict_first.pop("""mel_filters""" )
__a = dict_second.pop("""mel_filters""" )
self.assertTrue(np.allclose(__lowercase , __lowercase ) )
self.assertEqual(__lowercase , __lowercase )
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
__a = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__a = os.path.join(__lowercase , """feat_extract.json""" )
feat_extract_first.to_json_file(__lowercase )
__a = self.feature_extraction_class.from_json_file(__lowercase )
__a = feat_extract_first.to_dict()
__a = feat_extract_second.to_dict()
__a = dict_first.pop("""mel_filters""" )
__a = dict_second.pop("""mel_filters""" )
self.assertTrue(np.allclose(__lowercase , __lowercase ) )
self.assertEqual(__lowercase , __lowercase )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
# Initialize feature_extractor
__a = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
__a = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
__a = [np.asarray(__lowercase ) for speech_input in speech_inputs]
# Test not batched input
__a = feature_extractor(np_speech_inputs[0] , return_tensors="""np""" , sampling_rate=44100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
__a = feature_extractor(__lowercase , return_tensors="""np""" , sampling_rate=44100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
__a = feature_extractor(
__lowercase , return_tensors="""np""" , sampling_rate=44100 , mask_audio=__lowercase ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
__a = [floats_list((1, x) )[0] for x in (800, 800, 800)]
__a = np.asarray(__lowercase )
__a = feature_extractor(__lowercase , return_tensors="""np""" , sampling_rate=44100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def UpperCamelCase_ ( self : Optional[int] , __lowercase : str ):
'''simple docstring'''
__a = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
__a = ds.sort("""id""" ).select(range(__lowercase ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
__a = self._load_datasamples(1 )
__a = TvltFeatureExtractor()
__a = feature_extractor(__lowercase , return_tensors="""pt""" ).audio_values
self.assertEquals(audio_values.shape , (1, 1, 192, 128) )
__a = torch.tensor([[-0.3032, -0.2708], [-0.4434, -0.4007]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , __lowercase , atol=1E-4 ) )
| 547 | 0 |
"""simple docstring"""
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
a : int = 1_6
a : Dict = 3_2
def _UpperCamelCase ( _A , _A = 1_6 , _A = "bert-base-cased" ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = AutoTokenizer.from_pretrained(_A )
_UpperCAmelCase = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(_A ):
# max_length=None => use the model max length (it's actually the default)
_UpperCAmelCase = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=_A , max_length=_A )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
_UpperCAmelCase = datasets.map(
_A , batched=_A , remove_columns=["""idx""", """sentence1""", """sentence2"""] , load_from_cache_file=_A )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_UpperCAmelCase = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(_A ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(_A , padding="""max_length""" , max_length=1_2_8 , return_tensors="""pt""" )
return tokenizer.pad(_A , padding="""longest""" , return_tensors="""pt""" )
# Instantiate dataloaders.
_UpperCAmelCase = DataLoader(
tokenized_datasets["""train"""] , shuffle=_A , collate_fn=_A , batch_size=_A )
_UpperCAmelCase = DataLoader(
tokenized_datasets["""validation"""] , shuffle=_A , collate_fn=_A , batch_size=_A )
return train_dataloader, eval_dataloader
def _UpperCamelCase ( _A , _A , _A , _A ) -> Tuple:
"""simple docstring"""
model.eval()
_UpperCAmelCase = 0
for step, batch in enumerate(_A ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_UpperCAmelCase = model(**_A )
_UpperCAmelCase = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
_UpperCAmelCase ,_UpperCAmelCase = accelerator.gather(
(predictions, batch["""labels"""]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(_A ) - 1:
_UpperCAmelCase = predictions[: len(eval_dataloader.dataset ) - samples_seen]
_UpperCAmelCase = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=_A , references=_A , )
_UpperCAmelCase = metric.compute()
return eval_metric["accuracy"]
def _UpperCamelCase ( _A , _A ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_UpperCAmelCase = config["""lr"""]
_UpperCAmelCase = int(config["""num_epochs"""] )
_UpperCAmelCase = int(config["""seed"""] )
_UpperCAmelCase = int(config["""batch_size"""] )
_UpperCAmelCase = args.model_name_or_path
set_seed(_A )
_UpperCAmelCase ,_UpperCAmelCase = get_dataloaders(_A , _A , _A )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_UpperCAmelCase = AutoModelForSequenceClassification.from_pretrained(_A , return_dict=_A )
# Instantiate optimizer
_UpperCAmelCase = (
AdamW
if accelerator.state.deepspeed_plugin is None
or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
_UpperCAmelCase = optimizer_cls(params=model.parameters() , lr=_A )
if accelerator.state.deepspeed_plugin is not None:
_UpperCAmelCase = accelerator.state.deepspeed_plugin.deepspeed_config[
"""gradient_accumulation_steps"""
]
else:
_UpperCAmelCase = 1
_UpperCAmelCase = (len(_A ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
_UpperCAmelCase = get_linear_schedule_with_warmup(
optimizer=_A , num_warmup_steps=0 , num_training_steps=_A , )
else:
_UpperCAmelCase = DummyScheduler(_A , total_num_steps=_A , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase = accelerator.prepare(
_A , _A , _A , _A , _A )
# We need to keep track of how many total steps we have iterated over
_UpperCAmelCase = 0
# We also need to keep track of the stating epoch so files are named properly
_UpperCAmelCase = 0
_UpperCAmelCase = evaluate.load("""glue""" , """mrpc""" )
_UpperCAmelCase = num_epochs
if args.partial_train_epoch is not None:
_UpperCAmelCase = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
_UpperCAmelCase = args.resume_from_checkpoint.split("""epoch_""" )[1]
_UpperCAmelCase = """"""
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
_UpperCAmelCase = int(_A ) + 1
_UpperCAmelCase = evaluation_loop(_A , _A , _A , _A )
accelerator.print("""resumed checkpoint performance:""" , _A )
accelerator.print("""resumed checkpoint's scheduler's lr:""" , lr_scheduler.get_lr()[0] )
accelerator.print("""resumed optimizers's lr:""" , optimizer.param_groups[0]["""lr"""] )
with open(os.path.join(args.output_dir , F"""state_{starting_epoch-1}.json""" ) , """r""" ) as f:
_UpperCAmelCase = json.load(_A )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
_UpperCAmelCase = {}
for epoch in range(_A , _A ):
model.train()
for step, batch in enumerate(_A ):
_UpperCAmelCase = model(**_A )
_UpperCAmelCase = outputs.loss
_UpperCAmelCase = loss / gradient_accumulation_steps
accelerator.backward(_A )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
_UpperCAmelCase = F"""epoch_{epoch}"""
_UpperCAmelCase = os.path.join(args.output_dir , _A )
accelerator.save_state(_A )
_UpperCAmelCase = evaluation_loop(_A , _A , _A , _A )
_UpperCAmelCase = accuracy
_UpperCAmelCase = lr_scheduler.get_lr()[0]
_UpperCAmelCase = optimizer.param_groups[0]["""lr"""]
_UpperCAmelCase = epoch
_UpperCAmelCase = overall_step
accelerator.print(F"""epoch {epoch}:""" , _A )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , F"""state_{epoch}.json""" ) , """w""" ) as f:
json.dump(_A , _A )
def _UpperCamelCase ( ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""" )
parser.add_argument(
"""--model_name_or_path""" , type=_A , default="""bert-base-cased""" , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=_A , )
parser.add_argument(
"""--output_dir""" , type=_A , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , )
parser.add_argument(
"""--resume_from_checkpoint""" , type=_A , default=_A , help="""If the training should continue from a checkpoint folder.""" , )
parser.add_argument(
"""--partial_train_epoch""" , type=_A , default=_A , help="""If passed, the training will stop after this number of epochs.""" , )
parser.add_argument(
"""--num_epochs""" , type=_A , default=2 , help="""Number of train epochs.""" , )
_UpperCAmelCase = parser.parse_args()
_UpperCAmelCase = {"""lr""": 2e-5, """num_epochs""": args.num_epochs, """seed""": 4_2, """batch_size""": 1_6}
training_function(_A , _A )
if __name__ == "__main__":
main() | 555 |
"""simple docstring"""
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
a : Any = logging.get_logger(__name__)
def _UpperCamelCase ( _A , _A ) -> int:
"""simple docstring"""
_UpperCAmelCase = RobertaPreLayerNormConfig.from_pretrained(
_A , architectures=["""RobertaPreLayerNormForMaskedLM"""] )
# convert state_dict
_UpperCAmelCase = torch.load(hf_hub_download(repo_id=_A , filename="""pytorch_model.bin""" ) )
_UpperCAmelCase = {}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith("""roberta.""" ):
_UpperCAmelCase = """roberta_prelayernorm.""" + tensor_key[len("""roberta.""" ) :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith(""".self.LayerNorm.weight""" ) or tensor_key.endswith(""".self.LayerNorm.bias""" ):
continue
_UpperCAmelCase = tensor_value
_UpperCAmelCase = RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=_A , config=_A , state_dict=_A )
model.save_pretrained(_A )
# convert tokenizer
_UpperCAmelCase = AutoTokenizer.from_pretrained(_A )
tokenizer.save_pretrained(_A )
if __name__ == "__main__":
a : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint-repo''',
default=None,
type=str,
required=True,
help='''Path the official PyTorch dump, e.g. \'andreasmadsen/efficient_mlm_m0.40\'.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
a : Union[str, Any] = parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path) | 555 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowerCAmelCase = {
'configuration_poolformer': [
'POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'PoolFormerConfig',
'PoolFormerOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = ['PoolFormerFeatureExtractor']
_lowerCAmelCase = ['PoolFormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
'POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'PoolFormerForImageClassification',
'PoolFormerModel',
'PoolFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 719 | from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import (
BackboneOutput,
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from ...utils.backbone_utils import BackboneMixin
from .configuration_resnet import ResNetConfig
_lowerCAmelCase = logging.get_logger(__name__)
# General docstring
_lowerCAmelCase = 'ResNetConfig'
# Base docstring
_lowerCAmelCase = 'microsoft/resnet-50'
_lowerCAmelCase = [1, 2_0_4_8, 7, 7]
# Image classification docstring
_lowerCAmelCase = 'microsoft/resnet-50'
_lowerCAmelCase = 'tiger cat'
_lowerCAmelCase = [
'microsoft/resnet-50',
# See all resnet models at https://huggingface.co/models?filter=resnet
]
class __UpperCAmelCase( nn.Module ):
"""simple docstring"""
def __init__( self , __magic_name__ , __magic_name__ , __magic_name__ = 3 , __magic_name__ = 1 , __magic_name__ = "relu" ):
"""simple docstring"""
super().__init__()
A_ : Union[str, Any] = nn.Convad(
__magic_name__ , __magic_name__ , kernel_size=__magic_name__ , stride=__magic_name__ , padding=kernel_size // 2 , bias=__magic_name__ )
A_ : Union[str, Any] = nn.BatchNormad(__magic_name__ )
A_ : List[Any] = ACTaFN[activation] if activation is not None else nn.Identity()
def UpperCAmelCase ( self , __magic_name__ ):
"""simple docstring"""
A_ : Union[str, Any] = self.convolution(__magic_name__ )
A_ : str = self.normalization(__magic_name__ )
A_ : Union[str, Any] = self.activation(__magic_name__ )
return hidden_state
class __UpperCAmelCase( nn.Module ):
"""simple docstring"""
def __init__( self , __magic_name__ ):
"""simple docstring"""
super().__init__()
A_ : Tuple = ResNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act )
A_ : str = nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1 )
A_ : str = config.num_channels
def UpperCAmelCase ( self , __magic_name__ ):
"""simple docstring"""
A_ : Optional[Any] = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'''Make sure that the channel dimension of the pixel values match with the one set in the configuration.''' )
A_ : Optional[int] = self.embedder(__magic_name__ )
A_ : Optional[Any] = self.pooler(__magic_name__ )
return embedding
class __UpperCAmelCase( nn.Module ):
"""simple docstring"""
def __init__( self , __magic_name__ , __magic_name__ , __magic_name__ = 2 ):
"""simple docstring"""
super().__init__()
A_ : Dict = nn.Convad(__magic_name__ , __magic_name__ , kernel_size=1 , stride=__magic_name__ , bias=__magic_name__ )
A_ : Optional[Any] = nn.BatchNormad(__magic_name__ )
def UpperCAmelCase ( self , __magic_name__ ):
"""simple docstring"""
A_ : Any = self.convolution(__magic_name__ )
A_ : List[str] = self.normalization(__magic_name__ )
return hidden_state
class __UpperCAmelCase( nn.Module ):
"""simple docstring"""
def __init__( self , __magic_name__ , __magic_name__ , __magic_name__ = 1 , __magic_name__ = "relu" ):
"""simple docstring"""
super().__init__()
A_ : Union[str, Any] = in_channels != out_channels or stride != 1
A_ : str = (
ResNetShortCut(__magic_name__ , __magic_name__ , stride=__magic_name__ ) if should_apply_shortcut else nn.Identity()
)
A_ : Union[str, Any] = nn.Sequential(
ResNetConvLayer(__magic_name__ , __magic_name__ , stride=__magic_name__ ) , ResNetConvLayer(__magic_name__ , __magic_name__ , activation=__magic_name__ ) , )
A_ : Optional[Any] = ACTaFN[activation]
def UpperCAmelCase ( self , __magic_name__ ):
"""simple docstring"""
A_ : List[str] = hidden_state
A_ : Any = self.layer(__magic_name__ )
A_ : Dict = self.shortcut(__magic_name__ )
hidden_state += residual
A_ : Any = self.activation(__magic_name__ )
return hidden_state
class __UpperCAmelCase( nn.Module ):
"""simple docstring"""
def __init__( self , __magic_name__ , __magic_name__ , __magic_name__ = 1 , __magic_name__ = "relu" , __magic_name__ = 4 ):
"""simple docstring"""
super().__init__()
A_ : Union[str, Any] = in_channels != out_channels or stride != 1
A_ : Optional[Any] = out_channels // reduction
A_ : Union[str, Any] = (
ResNetShortCut(__magic_name__ , __magic_name__ , stride=__magic_name__ ) if should_apply_shortcut else nn.Identity()
)
A_ : Optional[int] = nn.Sequential(
ResNetConvLayer(__magic_name__ , __magic_name__ , kernel_size=1 ) , ResNetConvLayer(__magic_name__ , __magic_name__ , stride=__magic_name__ ) , ResNetConvLayer(__magic_name__ , __magic_name__ , kernel_size=1 , activation=__magic_name__ ) , )
A_ : Optional[int] = ACTaFN[activation]
def UpperCAmelCase ( self , __magic_name__ ):
"""simple docstring"""
A_ : Dict = hidden_state
A_ : Optional[int] = self.layer(__magic_name__ )
A_ : List[Any] = self.shortcut(__magic_name__ )
hidden_state += residual
A_ : str = self.activation(__magic_name__ )
return hidden_state
class __UpperCAmelCase( nn.Module ):
"""simple docstring"""
def __init__( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = 2 , __magic_name__ = 2 , ):
"""simple docstring"""
super().__init__()
A_ : Dict = ResNetBottleNeckLayer if config.layer_type == '''bottleneck''' else ResNetBasicLayer
A_ : Optional[int] = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(__magic_name__ , __magic_name__ , stride=__magic_name__ , activation=config.hidden_act ) , *[layer(__magic_name__ , __magic_name__ , activation=config.hidden_act ) for _ in range(depth - 1 )] , )
def UpperCAmelCase ( self , __magic_name__ ):
"""simple docstring"""
A_ : Tuple = input
for layer in self.layers:
A_ : Optional[Any] = layer(__magic_name__ )
return hidden_state
class __UpperCAmelCase( nn.Module ):
"""simple docstring"""
def __init__( self , __magic_name__ ):
"""simple docstring"""
super().__init__()
A_ : Any = nn.ModuleList([] )
# based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input
self.stages.append(
ResNetStage(
__magic_name__ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
A_ : Optional[int] = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(__magic_name__ , config.depths[1:] ):
self.stages.append(ResNetStage(__magic_name__ , __magic_name__ , __magic_name__ , depth=__magic_name__ ) )
def UpperCAmelCase ( self , __magic_name__ , __magic_name__ = False , __magic_name__ = True ):
"""simple docstring"""
A_ : Optional[Any] = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
A_ : Optional[Any] = hidden_states + (hidden_state,)
A_ : List[Any] = stage_module(__magic_name__ )
if output_hidden_states:
A_ : Union[str, Any] = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(
last_hidden_state=__magic_name__ , hidden_states=__magic_name__ , )
class __UpperCAmelCase( A__ ):
"""simple docstring"""
__magic_name__ = ResNetConfig
__magic_name__ = """resnet"""
__magic_name__ = """pixel_values"""
__magic_name__ = True
def UpperCAmelCase ( self , __magic_name__ ):
"""simple docstring"""
if isinstance(__magic_name__ , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode='''fan_out''' , nonlinearity='''relu''' )
elif isinstance(__magic_name__ , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def UpperCAmelCase ( self , __magic_name__ , __magic_name__=False ):
"""simple docstring"""
if isinstance(__magic_name__ , __magic_name__ ):
A_ : Tuple = value
_lowerCAmelCase = r'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
_lowerCAmelCase = r'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
"""The bare ResNet model outputting raw features without any specific head on top.""" , A__ , )
class __UpperCAmelCase( A__ ):
"""simple docstring"""
def __init__( self , __magic_name__ ):
"""simple docstring"""
super().__init__(__magic_name__ )
A_ : int = config
A_ : Any = ResNetEmbeddings(__magic_name__ )
A_ : str = ResNetEncoder(__magic_name__ )
A_ : int = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__magic_name__ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=__magic_name__ , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def UpperCAmelCase ( self , __magic_name__ , __magic_name__ = None , __magic_name__ = None ):
"""simple docstring"""
A_ : List[str] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
A_ : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict
A_ : Any = self.embedder(__magic_name__ )
A_ : Optional[int] = self.encoder(
__magic_name__ , output_hidden_states=__magic_name__ , return_dict=__magic_name__ )
A_ : Tuple = encoder_outputs[0]
A_ : List[Any] = self.pooler(__magic_name__ )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=__magic_name__ , pooler_output=__magic_name__ , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
"""
ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
""" , A__ , )
class __UpperCAmelCase( A__ ):
"""simple docstring"""
def __init__( self , __magic_name__ ):
"""simple docstring"""
super().__init__(__magic_name__ )
A_ : Any = config.num_labels
A_ : str = ResNetModel(__magic_name__ )
# classification head
A_ : Dict = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__magic_name__ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__magic_name__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def UpperCAmelCase ( self , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , ):
"""simple docstring"""
A_ : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict
A_ : List[Any] = self.resnet(__magic_name__ , output_hidden_states=__magic_name__ , return_dict=__magic_name__ )
A_ : Optional[int] = outputs.pooler_output if return_dict else outputs[1]
A_ : Optional[int] = self.classifier(__magic_name__ )
A_ : Optional[Any] = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
A_ : Dict = '''regression'''
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
A_ : Any = '''single_label_classification'''
else:
A_ : Optional[Any] = '''multi_label_classification'''
if self.config.problem_type == "regression":
A_ : List[str] = MSELoss()
if self.num_labels == 1:
A_ : Tuple = loss_fct(logits.squeeze() , labels.squeeze() )
else:
A_ : Optional[Any] = loss_fct(__magic_name__ , __magic_name__ )
elif self.config.problem_type == "single_label_classification":
A_ : List[str] = CrossEntropyLoss()
A_ : int = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
A_ : Union[str, Any] = BCEWithLogitsLoss()
A_ : Optional[Any] = loss_fct(__magic_name__ , __magic_name__ )
if not return_dict:
A_ : List[Any] = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=__magic_name__ , logits=__magic_name__ , hidden_states=outputs.hidden_states )
@add_start_docstrings(
"""
ResNet backbone, to be used with frameworks like DETR and MaskFormer.
""" , A__ , )
class __UpperCAmelCase( A__ , A__ ):
"""simple docstring"""
def __init__( self , __magic_name__ ):
"""simple docstring"""
super().__init__(__magic_name__ )
super()._init_backbone(__magic_name__ )
A_ : Any = [config.embedding_size] + config.hidden_sizes
A_ : int = ResNetEmbeddings(__magic_name__ )
A_ : Any = ResNetEncoder(__magic_name__ )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__magic_name__ )
@replace_return_docstrings(output_type=__magic_name__ , config_class=_CONFIG_FOR_DOC )
def UpperCAmelCase ( self , __magic_name__ , __magic_name__ = None , __magic_name__ = None ):
"""simple docstring"""
A_ : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict
A_ : Tuple = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
A_ : str = self.embedder(__magic_name__ )
A_ : int = self.encoder(__magic_name__ , output_hidden_states=__magic_name__ , return_dict=__magic_name__ )
A_ : Dict = outputs.hidden_states
A_ : Tuple = ()
for idx, stage in enumerate(self.stage_names ):
if stage in self.out_features:
feature_maps += (hidden_states[idx],)
if not return_dict:
A_ : Dict = (feature_maps,)
if output_hidden_states:
output += (outputs.hidden_states,)
return output
return BackboneOutput(
feature_maps=__magic_name__ , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=__magic_name__ , )
| 236 | 0 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_poolformer import PoolFormerImageProcessor
UpperCAmelCase_ : int = logging.get_logger(__name__)
class lowercase__ ( _A ):
'''simple docstring'''
def __init__( self , *__snake_case , **__snake_case ):
warnings.warn(
"""The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use PoolFormerImageProcessor instead.""" , UpperCamelCase__ , )
super().__init__(*UpperCamelCase__ , **UpperCamelCase__ )
| 533 | '''simple docstring'''
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
__snake_case : List[Any] = {
'''sample_size''': 32,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 2,
'''num_class_embeds''': 10_00,
'''block_out_channels''': [32, 64],
'''attention_head_dim''': 8,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''scale_shift''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
__snake_case : Optional[int] = {
'''sample_size''': 64,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 3,
'''num_class_embeds''': 10_00,
'''block_out_channels''': [1_92, 1_92 * 2, 1_92 * 3, 1_92 * 4],
'''attention_head_dim''': 64,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''scale_shift''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
__snake_case : int = {
'''sample_size''': 2_56,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 2,
'''num_class_embeds''': None,
'''block_out_channels''': [2_56, 2_56, 2_56 * 2, 2_56 * 2, 2_56 * 4, 2_56 * 4],
'''attention_head_dim''': 64,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''ResnetDownsampleBlock2D''',
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
'''ResnetUpsampleBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''default''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
__snake_case : Dict = {
'''num_train_timesteps''': 40,
'''sigma_min''': 0.002,
'''sigma_max''': 80.0,
}
__snake_case : Tuple = {
'''num_train_timesteps''': 2_01,
'''sigma_min''': 0.002,
'''sigma_max''': 80.0,
}
__snake_case : str = {
'''num_train_timesteps''': 1_51,
'''sigma_min''': 0.002,
'''sigma_max''': 80.0,
}
def lowerCamelCase__ ( A_ ):
if isinstance(A_ , A_ ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError("boolean value expected" )
def lowerCamelCase__ ( A_ , A_ , A_ , A_ , A_=False ):
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.in_layers.0.weight"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.in_layers.0.bias"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.in_layers.2.weight"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.in_layers.2.bias"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.emb_layers.1.weight"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.emb_layers.1.bias"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.out_layers.0.weight"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.out_layers.0.bias"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.out_layers.3.weight"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.out_layers.3.bias"""]
if has_skip:
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.skip_connection.weight"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.skip_connection.bias"""]
return new_checkpoint
def lowerCamelCase__ ( A_ , A_ , A_ , A_ , A_=None ):
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = checkpoint[F"""{old_prefix}.qkv.weight"""].chunk(3 , dim=0 )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = checkpoint[F"""{old_prefix}.qkv.bias"""].chunk(3 , dim=0 )
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.norm.weight"""]
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.norm.bias"""]
UpperCAmelCase_ = weight_q.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase_ = bias_q.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase_ = weight_k.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase_ = bias_k.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase_ = weight_v.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase_ = bias_v.squeeze(-1 ).squeeze(-1 )
UpperCAmelCase_ = (
checkpoint[F"""{old_prefix}.proj_out.weight"""].squeeze(-1 ).squeeze(-1 )
)
UpperCAmelCase_ = checkpoint[F"""{old_prefix}.proj_out.bias"""].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def lowerCamelCase__ ( A_ , A_ ):
UpperCAmelCase_ = torch.load(A_ , map_location="cpu" )
UpperCAmelCase_ = {}
UpperCAmelCase_ = checkpoint["time_embed.0.weight"]
UpperCAmelCase_ = checkpoint["time_embed.0.bias"]
UpperCAmelCase_ = checkpoint["time_embed.2.weight"]
UpperCAmelCase_ = checkpoint["time_embed.2.bias"]
if unet_config["num_class_embeds"] is not None:
UpperCAmelCase_ = checkpoint["label_emb.weight"]
UpperCAmelCase_ = checkpoint["input_blocks.0.0.weight"]
UpperCAmelCase_ = checkpoint["input_blocks.0.0.bias"]
UpperCAmelCase_ = unet_config["down_block_types"]
UpperCAmelCase_ = unet_config["layers_per_block"]
UpperCAmelCase_ = unet_config["attention_head_dim"]
UpperCAmelCase_ = unet_config["block_out_channels"]
UpperCAmelCase_ = 1
UpperCAmelCase_ = channels_list[0]
for i, layer_type in enumerate(A_ ):
UpperCAmelCase_ = channels_list[i]
UpperCAmelCase_ = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(A_ ):
UpperCAmelCase_ = F"""down_blocks.{i}.resnets.{j}"""
UpperCAmelCase_ = F"""input_blocks.{current_layer}.0"""
UpperCAmelCase_ = True if j == 0 and downsample_block_has_skip else False
UpperCAmelCase_ = convert_resnet(A_ , A_ , A_ , A_ , has_skip=A_ )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(A_ ):
UpperCAmelCase_ = F"""down_blocks.{i}.resnets.{j}"""
UpperCAmelCase_ = F"""input_blocks.{current_layer}.0"""
UpperCAmelCase_ = True if j == 0 and downsample_block_has_skip else False
UpperCAmelCase_ = convert_resnet(A_ , A_ , A_ , A_ , has_skip=A_ )
UpperCAmelCase_ = F"""down_blocks.{i}.attentions.{j}"""
UpperCAmelCase_ = F"""input_blocks.{current_layer}.1"""
UpperCAmelCase_ = convert_attention(
A_ , A_ , A_ , A_ , A_ )
current_layer += 1
if i != len(A_ ) - 1:
UpperCAmelCase_ = F"""down_blocks.{i}.downsamplers.0"""
UpperCAmelCase_ = F"""input_blocks.{current_layer}.0"""
UpperCAmelCase_ = convert_resnet(A_ , A_ , A_ , A_ )
current_layer += 1
UpperCAmelCase_ = current_channels
# hardcoded the mid-block for now
UpperCAmelCase_ = "mid_block.resnets.0"
UpperCAmelCase_ = "middle_block.0"
UpperCAmelCase_ = convert_resnet(A_ , A_ , A_ , A_ )
UpperCAmelCase_ = "mid_block.attentions.0"
UpperCAmelCase_ = "middle_block.1"
UpperCAmelCase_ = convert_attention(A_ , A_ , A_ , A_ , A_ )
UpperCAmelCase_ = "mid_block.resnets.1"
UpperCAmelCase_ = "middle_block.2"
UpperCAmelCase_ = convert_resnet(A_ , A_ , A_ , A_ )
UpperCAmelCase_ = 0
UpperCAmelCase_ = unet_config["up_block_types"]
for i, layer_type in enumerate(A_ ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
UpperCAmelCase_ = F"""up_blocks.{i}.resnets.{j}"""
UpperCAmelCase_ = F"""output_blocks.{current_layer}.0"""
UpperCAmelCase_ = convert_resnet(A_ , A_ , A_ , A_ , has_skip=A_ )
current_layer += 1
if i != len(A_ ) - 1:
UpperCAmelCase_ = F"""up_blocks.{i}.upsamplers.0"""
UpperCAmelCase_ = F"""output_blocks.{current_layer-1}.1"""
UpperCAmelCase_ = convert_resnet(A_ , A_ , A_ , A_ )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
UpperCAmelCase_ = F"""up_blocks.{i}.resnets.{j}"""
UpperCAmelCase_ = F"""output_blocks.{current_layer}.0"""
UpperCAmelCase_ = convert_resnet(A_ , A_ , A_ , A_ , has_skip=A_ )
UpperCAmelCase_ = F"""up_blocks.{i}.attentions.{j}"""
UpperCAmelCase_ = F"""output_blocks.{current_layer}.1"""
UpperCAmelCase_ = convert_attention(
A_ , A_ , A_ , A_ , A_ )
current_layer += 1
if i != len(A_ ) - 1:
UpperCAmelCase_ = F"""up_blocks.{i}.upsamplers.0"""
UpperCAmelCase_ = F"""output_blocks.{current_layer-1}.2"""
UpperCAmelCase_ = convert_resnet(A_ , A_ , A_ , A_ )
UpperCAmelCase_ = checkpoint["out.0.weight"]
UpperCAmelCase_ = checkpoint["out.0.bias"]
UpperCAmelCase_ = checkpoint["out.2.weight"]
UpperCAmelCase_ = checkpoint["out.2.bias"]
return new_checkpoint
if __name__ == "__main__":
__snake_case : List[str] = argparse.ArgumentParser()
parser.add_argument('''--unet_path''', default=None, type=str, required=True, help='''Path to the unet.pt to convert.''')
parser.add_argument(
'''--dump_path''', default=None, type=str, required=True, help='''Path to output the converted UNet model.'''
)
parser.add_argument('''--class_cond''', default=True, type=str, help='''Whether the model is class-conditional.''')
__snake_case : List[str] = parser.parse_args()
__snake_case : Any = strabool(args.class_cond)
__snake_case : List[str] = os.path.basename(args.unet_path)
print(F'''Checkpoint: {ckpt_name}''')
# Get U-Net config
if "imagenet64" in ckpt_name:
__snake_case : Optional[int] = IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
__snake_case : Union[str, Any] = LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
__snake_case : List[str] = TEST_UNET_CONFIG
else:
raise ValueError(F'''Checkpoint type {ckpt_name} is not currently supported.''')
if not args.class_cond:
__snake_case : Optional[Any] = None
__snake_case : Optional[int] = con_pt_to_diffuser(args.unet_path, unet_config)
__snake_case : str = UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
__snake_case : Tuple = CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
__snake_case : Optional[int] = CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
__snake_case : Union[str, Any] = CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(F'''Checkpoint type {ckpt_name} is not currently supported.''')
__snake_case : Optional[Any] = CMStochasticIterativeScheduler(**scheduler_config)
__snake_case : Dict = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path)
| 660 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
"vinvino02/glpn-kitti": "https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json",
# See all GLPN models at https://huggingface.co/models?filter=glpn
}
class lowerCamelCase ( _UpperCAmelCase ):
_lowerCAmelCase : Tuple = '''glpn'''
def __init__( self , lowercase__=3 , lowercase__=4 , lowercase__=[2, 2, 2, 2] , lowercase__=[8, 4, 2, 1] , lowercase__=[3_2, 6_4, 1_6_0, 2_5_6] , lowercase__=[7, 3, 3, 3] , lowercase__=[4, 2, 2, 2] , lowercase__=[1, 2, 5, 8] , lowercase__=[4, 4, 4, 4] , lowercase__="gelu" , lowercase__=0.0 , lowercase__=0.0 , lowercase__=0.0_2 , lowercase__=0.1 , lowercase__=1e-6 , lowercase__=6_4 , lowercase__=1_0 , lowercase__=-1 , **lowercase__ , ):
super().__init__(**lowercase_)
__UpperCAmelCase : int = num_channels
__UpperCAmelCase : int = num_encoder_blocks
__UpperCAmelCase : Union[str, Any] = depths
__UpperCAmelCase : List[Any] = sr_ratios
__UpperCAmelCase : List[Any] = hidden_sizes
__UpperCAmelCase : Any = patch_sizes
__UpperCAmelCase : Union[str, Any] = strides
__UpperCAmelCase : List[Any] = mlp_ratios
__UpperCAmelCase : Optional[int] = num_attention_heads
__UpperCAmelCase : Union[str, Any] = hidden_act
__UpperCAmelCase : List[Any] = hidden_dropout_prob
__UpperCAmelCase : Dict = attention_probs_dropout_prob
__UpperCAmelCase : str = initializer_range
__UpperCAmelCase : Tuple = drop_path_rate
__UpperCAmelCase : Any = layer_norm_eps
__UpperCAmelCase : List[Any] = decoder_hidden_size
__UpperCAmelCase : Union[str, Any] = max_depth
__UpperCAmelCase : Dict = head_in_index
| 708 |
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
lowerCAmelCase = """sshleifer/bart-tiny-random"""
lowerCAmelCase = """patrickvonplaten/t5-tiny-random"""
@require_torch
class lowerCamelCase ( unittest.TestCase ):
@cached_property
def A( self):
return AutoConfig.from_pretrained(lowercase__)
def A( self):
__UpperCAmelCase , *__UpperCAmelCase : Dict = create_student_by_copying_alternating_layers(lowercase__ , tempfile.mkdtemp() , e=1 , d=1)
self.assertEqual(student.config.num_hidden_layers , 1)
def A( self):
__UpperCAmelCase , *__UpperCAmelCase : Union[str, Any] = create_student_by_copying_alternating_layers(lowercase__ , tempfile.mkdtemp() , e=1 , d=lowercase__)
def A( self):
__UpperCAmelCase , *__UpperCAmelCase : Tuple = create_student_by_copying_alternating_layers(lowercase__ , tempfile.mkdtemp() , e=1 , d=lowercase__)
self.assertEqual(student.config.encoder_layers , 1)
self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers)
def A( self):
__UpperCAmelCase , *__UpperCAmelCase : Dict = create_student_by_copying_alternating_layers(lowercase__ , tempfile.mkdtemp() , e=1 , d=1)
self.assertEqual(student.config.encoder_layers , 1)
self.assertEqual(student.config.decoder_layers , 1)
def A( self):
with self.assertRaises(lowercase__):
create_student_by_copying_alternating_layers(lowercase__ , tempfile.mkdtemp() , e=lowercase__ , d=lowercase__)
| 675 | 0 |
'''simple docstring'''
def A_( A : int):
if divisor % 5 == 0 or divisor % 2 == 0:
return 0
UpperCamelCase = 1
UpperCamelCase = 1
while repunit:
UpperCamelCase = (10 * repunit + 1) % divisor
repunit_index += 1
return repunit_index
def A_( A : int = 100_0000):
UpperCamelCase = limit - 1
if divisor % 2 == 0:
divisor += 1
while least_divisible_repunit(A) <= limit:
divisor += 2
return divisor
if __name__ == "__main__":
print(f"""{solution() = }""")
| 3 |
'''simple docstring'''
import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
lowerCAmelCase : Dict = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( enum.Enum):
lowerCAmelCase_ = 0
lowerCAmelCase_ = 1
@add_end_docstrings(snake_case_)
class SCREAMING_SNAKE_CASE__ ( snake_case_):
lowerCAmelCase_ = """generated"""
def __init__( self , *A_ , **A_ )-> Optional[int]:
'''simple docstring'''
super().__init__(*A_ , **A_ )
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == 'tf'
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING )
def UpperCAmelCase_ ( self , A_=None , A_=None , A_=None , A_=None , A_=None , A_=None , **A_ , )-> Optional[Any]:
'''simple docstring'''
UpperCamelCase = {}
if truncation is not None:
UpperCamelCase = truncation
UpperCamelCase = generate_kwargs
UpperCamelCase = {}
if return_tensors is not None and return_type is None:
UpperCamelCase = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
UpperCamelCase = return_type
if clean_up_tokenization_spaces is not None:
UpperCamelCase = clean_up_tokenization_spaces
if stop_sequence is not None:
UpperCamelCase = self.tokenizer.encode(A_ , add_special_tokens=A_ )
if len(A_ ) > 1:
warnings.warn(
'Stopping on a multiple token sequence is not yet supported on transformers. The first token of'
' the stop sequence will be used as the stop sequence string in the interim.' )
UpperCamelCase = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def UpperCAmelCase_ ( self , A_ , A_ , A_ )-> Optional[int]:
'''simple docstring'''
return True
def UpperCAmelCase_ ( self , *A_ , A_ )-> Any:
'''simple docstring'''
UpperCamelCase = self.model.config.prefix if self.model.config.prefix is not None else ''
if isinstance(args[0] , A_ ):
if self.tokenizer.pad_token_id is None:
raise ValueError('Please make sure that the tokenizer has a pad_token_id when using a batch input' )
UpperCamelCase = ([prefix + arg for arg in args[0]],)
UpperCamelCase = True
elif isinstance(args[0] , A_ ):
UpperCamelCase = (prefix + args[0],)
UpperCamelCase = False
else:
raise ValueError(
F''' `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`''' )
UpperCamelCase = self.tokenizer(*A_ , padding=A_ , truncation=A_ , return_tensors=self.framework )
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self , *A_ , **A_ )-> Union[str, Any]:
'''simple docstring'''
UpperCamelCase = super().__call__(*A_ , **A_ )
if (
isinstance(args[0] , A_ )
and all(isinstance(A_ , A_ ) for el in args[0] )
and all(len(A_ ) == 1 for res in result )
):
return [res[0] for res in result]
return result
def UpperCAmelCase_ ( self , A_ , A_=TruncationStrategy.DO_NOT_TRUNCATE , **A_ )-> Any:
'''simple docstring'''
UpperCamelCase = self._parse_and_tokenize(A_ , truncation=A_ , **A_ )
return inputs
def UpperCAmelCase_ ( self , A_ , **A_ )-> int:
'''simple docstring'''
if self.framework == "pt":
UpperCamelCase , UpperCamelCase = model_inputs['input_ids'].shape
elif self.framework == "tf":
UpperCamelCase , UpperCamelCase = tf.shape(model_inputs['input_ids'] ).numpy()
UpperCamelCase = generate_kwargs.get('min_length' , self.model.config.min_length )
UpperCamelCase = generate_kwargs.get('max_length' , self.model.config.max_length )
self.check_inputs(A_ , generate_kwargs['min_length'] , generate_kwargs['max_length'] )
UpperCamelCase = self.model.generate(**A_ , **A_ )
UpperCamelCase = output_ids.shape[0]
if self.framework == "pt":
UpperCamelCase = output_ids.reshape(A_ , out_b // in_b , *output_ids.shape[1:] )
elif self.framework == "tf":
UpperCamelCase = tf.reshape(A_ , (in_b, out_b // in_b, *output_ids.shape[1:]) )
return {"output_ids": output_ids}
def UpperCAmelCase_ ( self , A_ , A_=ReturnType.TEXT , A_=False )-> Optional[Any]:
'''simple docstring'''
UpperCamelCase = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
UpperCamelCase = {F'''{self.return_name}_token_ids''': output_ids}
elif return_type == ReturnType.TEXT:
UpperCamelCase = {
F'''{self.return_name}_text''': self.tokenizer.decode(
A_ , skip_special_tokens=A_ , clean_up_tokenization_spaces=A_ , )
}
records.append(A_ )
return records
@add_end_docstrings(snake_case_)
class SCREAMING_SNAKE_CASE__ ( snake_case_):
lowerCAmelCase_ = """summary"""
def __call__( self , *A_ , **A_ )-> Optional[int]:
'''simple docstring'''
return super().__call__(*A_ , **A_ )
def UpperCAmelCase_ ( self , A_ , A_ , A_ )-> bool:
'''simple docstring'''
if max_length < min_length:
logger.warning(F'''Your min_length={min_length} must be inferior than your max_length={max_length}.''' )
if input_length < max_length:
logger.warning(
F'''Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is '''
'a summarization task, where outputs shorter than the input are typically wanted, you might '
F'''consider decreasing max_length manually, e.g. summarizer(\'...\', max_length={input_length//2})''' )
@add_end_docstrings(snake_case_)
class SCREAMING_SNAKE_CASE__ ( snake_case_):
lowerCAmelCase_ = """translation"""
def UpperCAmelCase_ ( self , A_ , A_ , A_ )-> List[Any]:
'''simple docstring'''
if input_length > 0.9 * max_length:
logger.warning(
F'''Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider '''
'increasing your max_length manually, e.g. translator(\'...\', max_length=400)' )
return True
def UpperCAmelCase_ ( self , *A_ , A_=TruncationStrategy.DO_NOT_TRUNCATE , A_=None , A_=None )-> Dict:
'''simple docstring'''
if getattr(self.tokenizer , '_build_translation_inputs' , A_ ):
return self.tokenizer._build_translation_inputs(
*A_ , return_tensors=self.framework , truncation=A_ , src_lang=A_ , tgt_lang=A_ )
else:
return super()._parse_and_tokenize(*A_ , truncation=A_ )
def UpperCAmelCase_ ( self , A_=None , A_=None , **A_ )-> str:
'''simple docstring'''
UpperCamelCase , UpperCamelCase , UpperCamelCase = super()._sanitize_parameters(**A_ )
if src_lang is not None:
UpperCamelCase = src_lang
if tgt_lang is not None:
UpperCamelCase = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
UpperCamelCase = kwargs.get('task' , self.task )
UpperCamelCase = task.split('_' )
if task and len(A_ ) == 4:
# translation, XX, to YY
UpperCamelCase = items[1]
UpperCamelCase = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self , *A_ , **A_ )-> Any:
'''simple docstring'''
return super().__call__(*A_ , **A_ )
| 3 | 1 |
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class snake_case_ ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
__lowerCamelCase : Tuple = IFInpaintingSuperResolutionPipeline
__lowerCamelCase : Tuple = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'width', 'height'}
__lowerCamelCase : Optional[Any] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({'original_image'} )
__lowerCamelCase : List[Any] = PipelineTesterMixin.required_optional_params - {'latents'}
def __A ( self ):
return self._get_superresolution_dummy_components()
def __A ( self , __lowerCAmelCase , __lowerCAmelCase=0 ):
if str(__lowerCAmelCase ).startswith('mps' ):
SCREAMING_SNAKE_CASE_ : List[Any] = torch.manual_seed(__lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.Generator(device=__lowerCAmelCase ).manual_seed(__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : int = floats_tensor((1, 3, 16, 16) , rng=random.Random(__lowerCAmelCase ) ).to(__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Dict = floats_tensor((1, 3, 32, 32) , rng=random.Random(__lowerCAmelCase ) ).to(__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : str = floats_tensor((1, 3, 32, 32) , rng=random.Random(__lowerCAmelCase ) ).to(__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : int = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'original_image': original_image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def __A ( self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def __A ( self ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def __A ( self ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def __A ( self ):
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def __A ( self ):
self._test_save_load_local()
def __A ( self ):
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 707 |
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Tuple:
SCREAMING_SNAKE_CASE_ : Optional[Any] = {
'en': 'Machine learning is great, isn\'t it?',
'ru': 'Машинное обучение - это здорово, не так ли?',
'de': 'Maschinelles Lernen ist großartig, oder?',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
SCREAMING_SNAKE_CASE_ : Optional[Any] = {
'ru-en': ['[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)', '39.20'],
'en-ru': ['[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)', '33.47'],
'en-de': ['[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)', '42.83'],
'de-en': ['[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)', '41.35'],
}
SCREAMING_SNAKE_CASE_ : int = f'{src_lang}-{tgt_lang}'
SCREAMING_SNAKE_CASE_ : Tuple = f'\n---\nlanguage: \n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt19\n- facebook\nlicense: apache-2.0\ndatasets:\n- wmt19\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.\n\nFor more details, please see, [Facebook FAIR\'s WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).\n\nThe abbreviation FSMT stands for FairSeqMachineTranslation\n\nAll four models are available:\n\n* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)\n* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)\n* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)\n* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = "facebook/wmt19-{src_lang}-{tgt_lang}"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = "{texts[src_lang]}"\ninput_ids = tokenizer.encode(input, return_tensors="pt")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n- The original (and this ported model) doesn\'t seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)\n\n## Training data\n\nPretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).\n\n## Eval results\n\npair | fairseq | transformers\n-------|---------|----------\n{pair} | {scores[pair][0]} | {scores[pair][1]}\n\nThe score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn\'t support:\n- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).\n- re-ranking\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=15\nmkdir -p $DATA_DIR\nsacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\nnote: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt19/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)\n\n\n### BibTeX entry and citation info\n\n```bibtex\n@inproceedings{{...,\n year={{2020}},\n title={{Facebook FAIR\'s WMT19 News Translation Task Submission}},\n author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},\n booktitle={{Proc. of WMT}},\n}}\n```\n\n\n## TODO\n\n- port model ensemble (fairseq uses 4 model checkpoints)\n\n'
os.makedirs(SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : int = os.path.join(SCREAMING_SNAKE_CASE , 'README.md' )
print(f'Generating {path}' )
with open(SCREAMING_SNAKE_CASE , 'w' , encoding='utf-8' ) as f:
f.write(SCREAMING_SNAKE_CASE )
# make sure we are under the root of the project
lowerCAmelCase__: str = Path(__file__).resolve().parent.parent.parent
lowerCAmelCase__: Optional[Any] = repo_dir / "model_cards"
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__: str = model_name.split("-")
lowerCAmelCase__: Any = model_cards_dir / "facebook" / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 311 | 0 |
from math import isqrt, loga
def SCREAMING_SNAKE_CASE_ ( _snake_case :int ) -> list[int]:
_A = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , _snake_case , _snake_case ):
_A = False
return [i for i in range(2 , _snake_case ) if is_prime[i]]
def SCREAMING_SNAKE_CASE_ ( _snake_case :int = 800_800 , _snake_case :int = 800_800 ) -> int:
_A = degree * loga(_snake_case )
_A = int(_snake_case )
_A = calculate_prime_numbers(_snake_case )
_A = 0
_A = 0
_A = len(_snake_case ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(f'{solution() = }')
| 2 |
from math import sqrt
import numpy as np
from sympy import symbols
# Coefficient
# Speed of light (m/s)
__magic_name__ = 299_792_458
# Symbols
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = symbols('''ct x y z''')
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase ):
if velocity > c:
raise ValueError("Speed must not exceed light speed 299,792,458 [m/s]!" )
elif velocity < 1:
# Usually the speed should be much higher than 1 (c order of magnitude)
raise ValueError("Speed must be greater than or equal to 1!" )
return velocity / c
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase ):
return 1 / sqrt(1 - beta(__lowerCAmelCase ) ** 2 )
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase ):
return np.array(
[
[gamma(__lowerCAmelCase ), -gamma(__lowerCAmelCase ) * beta(__lowerCAmelCase ), 0, 0],
[-gamma(__lowerCAmelCase ) * beta(__lowerCAmelCase ), gamma(__lowerCAmelCase ), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
] )
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase , __lowerCAmelCase = None ):
# Ensure event is not empty
if event is None:
snake_case__ = np.array([ct, x, y, z] ) # Symbolic four vector
else:
event[0] *= c # x0 is ct (speed of light * time)
return transformation_matrix(__lowerCAmelCase ) @ event
if __name__ == "__main__":
import doctest
doctest.testmod()
# Example of symbolic vector:
__magic_name__ = transform(29_979_245)
print('''Example of four vector: ''')
print(F'''ct\' = {four_vector[0]}''')
print(F'''x\' = {four_vector[1]}''')
print(F'''y\' = {four_vector[2]}''')
print(F'''z\' = {four_vector[3]}''')
# Substitute symbols with numerical values
__magic_name__ = {ct: c, x: 1, y: 1, z: 1}
__magic_name__ = [four_vector[i].subs(sub_dict) for i in range(4)]
print(F'''\n{numerical_vector}''')
| 276 | 0 |
'''simple docstring'''
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def _snake_case ( _SCREAMING_SNAKE_CASE : List[Any] ) -> int:
"""simple docstring"""
if is_torch_version("""<""" , """2.0.0""" ) or not hasattr(_SCREAMING_SNAKE_CASE , """_dynamo""" ):
return False
return isinstance(_SCREAMING_SNAKE_CASE , torch._dynamo.eval_frame.OptimizedModule )
def _snake_case ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : bool = True ) -> Tuple:
"""simple docstring"""
lowerCAmelCase = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
lowerCAmelCase = is_compiled_module(_SCREAMING_SNAKE_CASE )
if is_compiled:
lowerCAmelCase = model
lowerCAmelCase = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
lowerCAmelCase = model.module
if not keep_fpaa_wrapper:
lowerCAmelCase = getattr(_SCREAMING_SNAKE_CASE , """forward""" )
lowerCAmelCase = model.__dict__.pop("""_original_forward""" , _SCREAMING_SNAKE_CASE )
if original_forward is not None:
while hasattr(_SCREAMING_SNAKE_CASE , """__wrapped__""" ):
lowerCAmelCase = forward.__wrapped__
if forward == original_forward:
break
lowerCAmelCase = forward
if getattr(_SCREAMING_SNAKE_CASE , """_converted_to_transformer_engine""" , _SCREAMING_SNAKE_CASE ):
convert_model(_SCREAMING_SNAKE_CASE , to_transformer_engine=_SCREAMING_SNAKE_CASE )
if is_compiled:
lowerCAmelCase = model
lowerCAmelCase = compiled_model
return model
def _snake_case ( ) -> Dict:
"""simple docstring"""
PartialState().wait_for_everyone()
def _snake_case ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Tuple ) -> Optional[int]:
"""simple docstring"""
if PartialState().distributed_type == DistributedType.TPU:
xm.save(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif PartialState().local_process_index == 0:
torch.save(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@contextmanager
def _snake_case ( **_SCREAMING_SNAKE_CASE : Optional[int] ) -> List[Any]:
"""simple docstring"""
for key, value in kwargs.items():
lowerCAmelCase = str(_SCREAMING_SNAKE_CASE )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def _snake_case ( _SCREAMING_SNAKE_CASE : Optional[Any] ) -> List[Any]:
"""simple docstring"""
if not hasattr(_SCREAMING_SNAKE_CASE , """__qualname__""" ) and not hasattr(_SCREAMING_SNAKE_CASE , """__name__""" ):
lowerCAmelCase = getattr(_SCREAMING_SNAKE_CASE , """__class__""" , _SCREAMING_SNAKE_CASE )
if hasattr(_SCREAMING_SNAKE_CASE , """__qualname__""" ):
return obj.__qualname__
if hasattr(_SCREAMING_SNAKE_CASE , """__name__""" ):
return obj.__name__
return str(_SCREAMING_SNAKE_CASE )
def _snake_case ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : List[Any] ) -> str:
"""simple docstring"""
for key, value in source.items():
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
lowerCAmelCase = destination.setdefault(_SCREAMING_SNAKE_CASE , {} )
merge_dicts(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
lowerCAmelCase = value
return destination
def _snake_case ( _SCREAMING_SNAKE_CASE : int = None ) -> bool:
"""simple docstring"""
if port is None:
lowerCAmelCase = 29_500
with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s:
return s.connect_ex(("""localhost""", port) ) == 0 | 700 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase = {
'configuration_blenderbot': [
'BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BlenderbotConfig',
'BlenderbotOnnxConfig',
],
'tokenization_blenderbot': ['BlenderbotTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = ['BlenderbotTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BlenderbotForCausalLM',
'BlenderbotForConditionalGeneration',
'BlenderbotModel',
'BlenderbotPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'TFBlenderbotForConditionalGeneration',
'TFBlenderbotModel',
'TFBlenderbotPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'FlaxBlenderbotForConditionalGeneration',
'FlaxBlenderbotModel',
'FlaxBlenderbotPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 344 | 0 |
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = list(SCREAMING_SNAKE_CASE )
lowercase__ = list(SCREAMING_SNAKE_CASE )
lowercase__ = 0
for i in range(len(SCREAMING_SNAKE_CASE ) ):
if lista[i] != lista[i]:
count += 1
lowercase__ = '''_'''
if count > 1:
return False
else:
return "".join(SCREAMING_SNAKE_CASE )
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = []
while True:
lowercase__ = ['''$'''] * len(SCREAMING_SNAKE_CASE )
lowercase__ = []
for i in range(len(SCREAMING_SNAKE_CASE ) ):
for j in range(i + 1 , len(SCREAMING_SNAKE_CASE ) ):
lowercase__ = compare_string(binary[i] , binary[j] )
if k is False:
lowercase__ = '''*'''
lowercase__ = '''*'''
temp.append('''X''' )
for i in range(len(SCREAMING_SNAKE_CASE ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(SCREAMING_SNAKE_CASE ) == 0:
return pi
lowercase__ = list(set(SCREAMING_SNAKE_CASE ) )
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = []
for minterm in minterms:
lowercase__ = ''''''
for _ in range(SCREAMING_SNAKE_CASE ):
lowercase__ = str(minterm % 2 ) + string
minterm //= 2
temp.append(SCREAMING_SNAKE_CASE )
return temp
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = list(SCREAMING_SNAKE_CASE )
lowercase__ = list(SCREAMING_SNAKE_CASE )
lowercase__ = 0
for i in range(len(SCREAMING_SNAKE_CASE ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = []
lowercase__ = [0] * len(SCREAMING_SNAKE_CASE )
for i in range(len(chart[0] ) ):
lowercase__ = 0
lowercase__ = -1
for j in range(len(SCREAMING_SNAKE_CASE ) ):
if chart[j][i] == 1:
count += 1
lowercase__ = j
if count == 1:
lowercase__ = 1
for i in range(len(SCREAMING_SNAKE_CASE ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(SCREAMING_SNAKE_CASE ) ):
lowercase__ = 0
temp.append(prime_implicants[i] )
while True:
lowercase__ = 0
lowercase__ = -1
lowercase__ = 0
for i in range(len(SCREAMING_SNAKE_CASE ) ):
lowercase__ = chart[i].count(1 )
if count_n > max_n:
lowercase__ = count_n
lowercase__ = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(SCREAMING_SNAKE_CASE ) ):
lowercase__ = 0
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = [[0 for x in range(len(SCREAMING_SNAKE_CASE ) )] for x in range(len(SCREAMING_SNAKE_CASE ) )]
for i in range(len(SCREAMING_SNAKE_CASE ) ):
lowercase__ = prime_implicants[i].count('''_''' )
for j in range(len(SCREAMING_SNAKE_CASE ) ):
if is_for_table(prime_implicants[i] , binary[j] , SCREAMING_SNAKE_CASE ):
lowercase__ = 1
return chart
def _a ( ):
"""simple docstring"""
lowercase__ = int(input('''Enter the no. of variables\n''' ) )
lowercase__ = [
float(SCREAMING_SNAKE_CASE )
for x in input(
'''Enter the decimal representation of Minterms \'Spaces Separated\'\n''' ).split()
]
lowercase__ = decimal_to_binary(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ = check(SCREAMING_SNAKE_CASE )
print('''Prime Implicants are:''' )
print(SCREAMING_SNAKE_CASE )
lowercase__ = prime_implicant_chart(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ = selection(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
print('''Essential Prime Implicants are:''' )
print(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 43 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
SCREAMING_SNAKE_CASE__ = {
'''configuration_bridgetower''': [
'''BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BridgeTowerConfig''',
'''BridgeTowerTextConfig''',
'''BridgeTowerVisionConfig''',
],
'''processing_bridgetower''': ['''BridgeTowerProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ['''BridgeTowerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
'''BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BridgeTowerForContrastiveLearning''',
'''BridgeTowerForImageAndTextRetrieval''',
'''BridgeTowerForMaskedLM''',
'''BridgeTowerModel''',
'''BridgeTowerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_bridgetower import (
BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP,
BridgeTowerConfig,
BridgeTowerTextConfig,
BridgeTowerVisionConfig,
)
from .processing_bridgetower import BridgeTowerProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_bridgetower import BridgeTowerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bridgetower import (
BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST,
BridgeTowerForContrastiveLearning,
BridgeTowerForImageAndTextRetrieval,
BridgeTowerForMaskedLM,
BridgeTowerModel,
BridgeTowerPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 47 | 0 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class a ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self : Any ):
__lowerCamelCase: Optional[Any] = tempfile.mkdtemp()
# fmt: off
__lowerCamelCase: Tuple = ['''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>''']
# fmt: on
__lowerCamelCase: Dict = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase ) ) ) )
__lowerCamelCase: Any = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''', '''''']
__lowerCamelCase: str = {'''unk_token''': '''<unk>'''}
__lowerCamelCase: Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__lowerCamelCase: Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_UpperCAmelCase ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(_UpperCAmelCase ) )
__lowerCamelCase: Any = {
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.4814_5466, 0.457_8275, 0.4082_1073],
'''image_std''': [0.2686_2954, 0.2613_0258, 0.2757_7711],
}
__lowerCamelCase: Union[str, Any] = os.path.join(self.tmpdirname , _UpperCAmelCase )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(_UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : List[str] , **SCREAMING_SNAKE_CASE_ : Any ):
return CLIPTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , **SCREAMING_SNAKE_CASE_ : List[Any] ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : List[str] , **SCREAMING_SNAKE_CASE_ : Tuple ):
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : str ):
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
__lowerCamelCase: Tuple = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__lowerCamelCase: Union[str, Any] = [Image.fromarray(np.moveaxis(_UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
__lowerCamelCase: str = self.get_tokenizer()
__lowerCamelCase: Any = self.get_rust_tokenizer()
__lowerCamelCase: Union[str, Any] = self.get_image_processor()
__lowerCamelCase: Any = CLIPProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
processor_slow.save_pretrained(self.tmpdirname )
__lowerCamelCase: List[str] = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=_UpperCAmelCase )
__lowerCamelCase: int = CLIPProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
processor_fast.save_pretrained(self.tmpdirname )
__lowerCamelCase: Dict = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , _UpperCAmelCase )
self.assertIsInstance(processor_fast.tokenizer , _UpperCAmelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , _UpperCAmelCase )
self.assertIsInstance(processor_fast.image_processor , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
__lowerCamelCase: Optional[int] = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__lowerCamelCase: int = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
__lowerCamelCase: Optional[Any] = self.get_image_processor(do_normalize=_UpperCAmelCase , padding_value=1.0 )
__lowerCamelCase: Dict = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=_UpperCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _UpperCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
__lowerCamelCase: Any = self.get_image_processor()
__lowerCamelCase: Tuple = self.get_tokenizer()
__lowerCamelCase: Optional[Any] = CLIPProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
__lowerCamelCase: Optional[int] = self.prepare_image_inputs()
__lowerCamelCase: int = image_processor(_UpperCAmelCase , return_tensors="""np""" )
__lowerCamelCase: Dict = processor(images=_UpperCAmelCase , return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
__lowerCamelCase: Union[str, Any] = self.get_image_processor()
__lowerCamelCase: List[str] = self.get_tokenizer()
__lowerCamelCase: Optional[int] = CLIPProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
__lowerCamelCase: Optional[Any] = '''lower newer'''
__lowerCamelCase: List[Any] = processor(text=_UpperCAmelCase )
__lowerCamelCase: Optional[Any] = tokenizer(_UpperCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
__lowerCamelCase: Optional[int] = self.get_image_processor()
__lowerCamelCase: Tuple = self.get_tokenizer()
__lowerCamelCase: Dict = CLIPProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
__lowerCamelCase: int = '''lower newer'''
__lowerCamelCase: List[Any] = self.prepare_image_inputs()
__lowerCamelCase: Union[str, Any] = processor(text=_UpperCAmelCase , images=_UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(_UpperCAmelCase ):
processor()
def SCREAMING_SNAKE_CASE__ ( self : Any ):
__lowerCamelCase: Union[str, Any] = self.get_image_processor()
__lowerCamelCase: str = self.get_tokenizer()
__lowerCamelCase: Optional[int] = CLIPProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
__lowerCamelCase: Optional[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__lowerCamelCase: int = processor.batch_decode(_UpperCAmelCase )
__lowerCamelCase: Dict = tokenizer.batch_decode(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
__lowerCamelCase: Tuple = self.get_image_processor()
__lowerCamelCase: Optional[int] = self.get_tokenizer()
__lowerCamelCase: Any = CLIPProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
__lowerCamelCase: Union[str, Any] = '''lower newer'''
__lowerCamelCase: Tuple = self.prepare_image_inputs()
__lowerCamelCase: Dict = processor(text=_UpperCAmelCase , images=_UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 713 |
from manim import *
class a ( _UpperCAmelCase ):
def SCREAMING_SNAKE_CASE__ ( self : int ):
__lowerCamelCase: int = Rectangle(height=0.5 , width=0.5 )
__lowerCamelCase: List[str] = Rectangle(height=0.25 , width=0.25 )
__lowerCamelCase: Optional[int] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
__lowerCamelCase: str = [mem.copy() for i in range(6 )]
__lowerCamelCase: Dict = [mem.copy() for i in range(6 )]
__lowerCamelCase: Union[str, Any] = VGroup(*SCREAMING_SNAKE_CASE_ ).arrange(SCREAMING_SNAKE_CASE_ , buff=0 )
__lowerCamelCase: Tuple = VGroup(*SCREAMING_SNAKE_CASE_ ).arrange(SCREAMING_SNAKE_CASE_ , buff=0 )
__lowerCamelCase: Optional[int] = VGroup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).arrange(SCREAMING_SNAKE_CASE_ , buff=0 )
__lowerCamelCase: Dict = Text("""CPU""" , font_size=24 )
__lowerCamelCase: Any = Group(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).arrange(SCREAMING_SNAKE_CASE_ , buff=0.5 , aligned_edge=SCREAMING_SNAKE_CASE_ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: Optional[int] = [mem.copy() for i in range(4 )]
__lowerCamelCase: Optional[int] = VGroup(*SCREAMING_SNAKE_CASE_ ).arrange(SCREAMING_SNAKE_CASE_ , buff=0 )
__lowerCamelCase: Dict = Text("""GPU""" , font_size=24 )
__lowerCamelCase: Dict = Group(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).arrange(SCREAMING_SNAKE_CASE_ , buff=0.5 , aligned_edge=SCREAMING_SNAKE_CASE_ )
gpu.move_to([-1, -1, 0] )
self.add(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: Any = [mem.copy() for i in range(6 )]
__lowerCamelCase: Dict = VGroup(*SCREAMING_SNAKE_CASE_ ).arrange(SCREAMING_SNAKE_CASE_ , buff=0 )
__lowerCamelCase: List[Any] = Text("""Model""" , font_size=24 )
__lowerCamelCase: Any = Group(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).arrange(SCREAMING_SNAKE_CASE_ , buff=0.5 , aligned_edge=SCREAMING_SNAKE_CASE_ )
model.move_to([3, -1.0, 0] )
self.add(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: Tuple = []
__lowerCamelCase: Any = []
__lowerCamelCase: int = []
for i, rect in enumerate(SCREAMING_SNAKE_CASE_ ):
rect.set_stroke(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: int = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(SCREAMING_SNAKE_CASE_ , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=SCREAMING_SNAKE_CASE_ )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(model_cpu_arr[0] , direction=SCREAMING_SNAKE_CASE_ , buff=0.0 )
else:
cpu_target.next_to(model_cpu_arr[i - 1] , direction=SCREAMING_SNAKE_CASE_ , buff=0.0 )
self.add(SCREAMING_SNAKE_CASE_ )
model_cpu_arr.append(SCREAMING_SNAKE_CASE_ )
self.add(*SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: List[Any] = [mem.copy() for i in range(6 )]
__lowerCamelCase: Any = VGroup(*SCREAMING_SNAKE_CASE_ ).arrange(SCREAMING_SNAKE_CASE_ , buff=0 )
__lowerCamelCase: Tuple = Text("""Loaded Checkpoint""" , font_size=24 )
__lowerCamelCase: Tuple = Group(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).arrange(SCREAMING_SNAKE_CASE_ , buff=0.5 , aligned_edge=SCREAMING_SNAKE_CASE_ )
checkpoint.move_to([3, 0.5, 0] )
self.add(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: List[Any] = []
__lowerCamelCase: Optional[int] = []
for i, rect in enumerate(SCREAMING_SNAKE_CASE_ ):
__lowerCamelCase: Optional[int] = fill.copy().set_fill(SCREAMING_SNAKE_CASE_ , opacity=0.7 )
target.move_to(SCREAMING_SNAKE_CASE_ )
ckpt_arr.append(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: List[Any] = target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.move_to(cpu_right_col_base[i - 5] )
ckpt_cpu_arr.append(SCREAMING_SNAKE_CASE_ )
self.add(*SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: Optional[Any] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
__lowerCamelCase: Dict = MarkupText(
F'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: Optional[int] = MarkupText(
F'<span fgcolor=\'{BLUE}\'>●</span> Checkpoint' , font_size=18 , )
blue_text.next_to(SCREAMING_SNAKE_CASE_ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: Tuple = MarkupText(
F'Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.' , font_size=24 , )
step_a.move_to([2, 2, 0] )
__lowerCamelCase: List[Any] = [meta_mem.copy() for i in range(6 )]
__lowerCamelCase: Optional[int] = [meta_mem.copy() for i in range(6 )]
__lowerCamelCase: str = VGroup(*SCREAMING_SNAKE_CASE_ ).arrange(SCREAMING_SNAKE_CASE_ , buff=0 )
__lowerCamelCase: Optional[int] = VGroup(*SCREAMING_SNAKE_CASE_ ).arrange(SCREAMING_SNAKE_CASE_ , buff=0 )
__lowerCamelCase: List[str] = VGroup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).arrange(SCREAMING_SNAKE_CASE_ , buff=0 )
__lowerCamelCase: Dict = Text("""Disk""" , font_size=24 )
__lowerCamelCase: Any = Group(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).arrange(SCREAMING_SNAKE_CASE_ , buff=0.5 , aligned_edge=SCREAMING_SNAKE_CASE_ )
disk.move_to([-4.0, -1.25, 0] )
self.play(Write(SCREAMING_SNAKE_CASE_ , run_time=3 ) , Write(SCREAMING_SNAKE_CASE_ , run_time=1 ) , Create(SCREAMING_SNAKE_CASE_ , run_time=1 ) )
__lowerCamelCase: int = []
for i, rect in enumerate(SCREAMING_SNAKE_CASE_ ):
__lowerCamelCase: Optional[int] = rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i] ).scale(0.5 )
animations.append(MoveToTarget(SCREAMING_SNAKE_CASE_ , run_time=1.5 ) )
self.play(*SCREAMING_SNAKE_CASE_ )
self.play(FadeOut(SCREAMING_SNAKE_CASE_ ) )
__lowerCamelCase: List[Any] = MarkupText(F'Then, the checkpoint is removed from memory\nthrough garbage collection.' , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(SCREAMING_SNAKE_CASE_ , run_time=3 ) )
self.play(
FadeOut(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ ) , )
self.wait()
| 189 | 0 |
'''simple docstring'''
from __future__ import annotations
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Dict: # noqa: E741
'''simple docstring'''
while r - l > 1:
snake_case : int = (l + r) // 2
if v[m] >= key:
snake_case : List[Any] = m
else:
snake_case : str = m # noqa: E741
return r
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ ) -> int:
'''simple docstring'''
if len(SCREAMING_SNAKE_CASE__ ) == 0:
return 0
snake_case : List[str] = [0] * len(SCREAMING_SNAKE_CASE__ )
snake_case : int = 1
snake_case : Any = v[0]
for i in range(1 , len(SCREAMING_SNAKE_CASE__ ) ):
if v[i] < tail[0]:
snake_case : Tuple = v[i]
elif v[i] > tail[length - 1]:
snake_case : Dict = v[i]
length += 1
else:
snake_case : Optional[Any] = v[i]
return length
if __name__ == "__main__":
import doctest
doctest.testmod()
| 638 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .modeling_utils import ModelMixin
@dataclass
class snake_case__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowerCamelCase = 42
class snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
@register_to_config
def __init__( self : List[str] , UpperCamelCase__ : int = 16 , UpperCamelCase__ : int = 88 , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : int = 1 , UpperCamelCase__ : float = 0.0 , UpperCamelCase__ : int = 32 , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : str = "geglu" , UpperCamelCase__ : bool = True , UpperCamelCase__ : bool = True , ) -> List[Any]:
"""simple docstring"""
super().__init__()
snake_case : Optional[int] = num_attention_heads
snake_case : int = attention_head_dim
snake_case : Dict = num_attention_heads * attention_head_dim
snake_case : Tuple = in_channels
snake_case : Optional[int] = torch.nn.GroupNorm(num_groups=UpperCamelCase__ , num_channels=UpperCamelCase__ , eps=1e-6 , affine=UpperCamelCase__ )
snake_case : List[Any] = nn.Linear(UpperCamelCase__ , UpperCamelCase__ )
# 3. Define transformers blocks
snake_case : Union[str, Any] = nn.ModuleList(
[
BasicTransformerBlock(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , dropout=UpperCamelCase__ , cross_attention_dim=UpperCamelCase__ , activation_fn=UpperCamelCase__ , attention_bias=UpperCamelCase__ , double_self_attention=UpperCamelCase__ , norm_elementwise_affine=UpperCamelCase__ , )
for d in range(UpperCamelCase__ )
] )
snake_case : Any = nn.Linear(UpperCamelCase__ , UpperCamelCase__ )
def lowerCAmelCase ( self : List[str] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Any=None , UpperCamelCase__ : Dict=None , UpperCamelCase__ : Dict=None , UpperCamelCase__ : Union[str, Any]=1 , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : bool = True , ) -> Any:
"""simple docstring"""
snake_case ,snake_case ,snake_case ,snake_case : Tuple = hidden_states.shape
snake_case : List[Any] = batch_frames // num_frames
snake_case : Union[str, Any] = hidden_states
snake_case : Optional[Any] = hidden_states[None, :].reshape(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
snake_case : int = hidden_states.permute(0 , 2 , 1 , 3 , 4 )
snake_case : Tuple = self.norm(UpperCamelCase__ )
snake_case : Union[str, Any] = hidden_states.permute(0 , 3 , 4 , 2 , 1 ).reshape(batch_size * height * width , UpperCamelCase__ , UpperCamelCase__ )
snake_case : Optional[Any] = self.proj_in(UpperCamelCase__ )
# 2. Blocks
for block in self.transformer_blocks:
snake_case : Union[str, Any] = block(
UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , timestep=UpperCamelCase__ , cross_attention_kwargs=UpperCamelCase__ , class_labels=UpperCamelCase__ , )
# 3. Output
snake_case : Dict = self.proj_out(UpperCamelCase__ )
snake_case : Tuple = (
hidden_states[None, None, :]
.reshape(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
.permute(0 , 3 , 4 , 1 , 2 )
.contiguous()
)
snake_case : List[Any] = hidden_states.reshape(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
snake_case : Any = hidden_states + residual
if not return_dict:
return (output,)
return TransformerTemporalModelOutput(sample=UpperCamelCase__ )
| 638 | 1 |
import argparse
import math
import os
import torch
from neural_compressor.utils.pytorch import load
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel
def A ( ) -> Tuple:
A__ = argparse.ArgumentParser()
parser.add_argument(
'-m' , '--pretrained_model_name_or_path' , type=__UpperCamelCase , default=__UpperCamelCase , required=__UpperCamelCase , help='Path to pretrained model or model identifier from huggingface.co/models.' , )
parser.add_argument(
'-c' , '--caption' , type=__UpperCamelCase , default='robotic cat with wings' , help='Text used to generate images.' , )
parser.add_argument(
'-n' , '--images_num' , type=__UpperCamelCase , default=4 , help='How much images to generate.' , )
parser.add_argument(
'-s' , '--seed' , type=__UpperCamelCase , default=42 , help='Seed for random process.' , )
parser.add_argument(
'-ci' , '--cuda_id' , type=__UpperCamelCase , default=0 , help='cuda_id.' , )
A__ = parser.parse_args()
return args
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Optional[Any]:
if not len(__UpperCamelCase ) == rows * cols:
raise ValueError('The specified number of rows and columns are not correct.' )
A__ , A__ = imgs[0].size
A__ = Image.new('RGB' , size=(cols * w, rows * h) )
A__ , A__ = grid.size
for i, img in enumerate(__UpperCamelCase ):
grid.paste(__UpperCamelCase , box=(i % cols * w, i // cols * h) )
return grid
def A ( __UpperCamelCase , __UpperCamelCase="robotic cat with wings" , __UpperCamelCase=7.5 , __UpperCamelCase=50 , __UpperCamelCase=1 , __UpperCamelCase=42 , ) -> Union[str, Any]:
A__ = torch.Generator(pipeline.device ).manual_seed(__UpperCamelCase )
A__ = pipeline(
__UpperCamelCase , guidance_scale=__UpperCamelCase , num_inference_steps=__UpperCamelCase , generator=__UpperCamelCase , num_images_per_prompt=__UpperCamelCase , ).images
A__ = int(math.sqrt(__UpperCamelCase ) )
A__ = image_grid(__UpperCamelCase , rows=_rows , cols=num_images_per_prompt // _rows )
return grid, images
SCREAMING_SNAKE_CASE__ = parse_args()
# Load models and create wrapper for stable diffusion
SCREAMING_SNAKE_CASE__ = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder='''tokenizer''')
SCREAMING_SNAKE_CASE__ = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='''text_encoder''')
SCREAMING_SNAKE_CASE__ = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder='''vae''')
SCREAMING_SNAKE_CASE__ = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='''unet''')
SCREAMING_SNAKE_CASE__ = StableDiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer
)
SCREAMING_SNAKE_CASE__ = lambda images, clip_input: (images, False)
if os.path.exists(os.path.join(args.pretrained_model_name_or_path, '''best_model.pt''')):
SCREAMING_SNAKE_CASE__ = load(args.pretrained_model_name_or_path, model=unet)
unet.eval()
setattr(pipeline, '''unet''', unet)
else:
SCREAMING_SNAKE_CASE__ = unet.to(torch.device('''cuda''', args.cuda_id))
SCREAMING_SNAKE_CASE__ = pipeline.to(unet.device)
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed)
grid.save(os.path.join(args.pretrained_model_name_or_path, '''{}.png'''.format('''_'''.join(args.caption.split()))))
SCREAMING_SNAKE_CASE__ = os.path.join(args.pretrained_model_name_or_path, '''_'''.join(args.caption.split()))
os.makedirs(dirname, exist_ok=True)
for idx, image in enumerate(images):
image.save(os.path.join(dirname, '''{}.png'''.format(idx + 1)))
| 720 |
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def A ( __UpperCamelCase ) -> Tuple:
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> List[Any]:
return max(metric_fn(__UpperCamelCase , __UpperCamelCase ) for gt in ground_truths )
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> List[str]:
A__ = [line.strip() for line in open(__UpperCamelCase , 'r' ).readlines()]
A__ = []
if args.gold_data_mode == "qa":
A__ = pd.read_csv(__UpperCamelCase , sep='\t' , header=__UpperCamelCase )
for answer_list in data[1]:
A__ = ast.literal_eval(__UpperCamelCase )
answers.append(__UpperCamelCase )
else:
A__ = [line.strip() for line in open(__UpperCamelCase , 'r' ).readlines()]
A__ = [[reference] for reference in references]
A__ = A__ = A__ = 0
for prediction, ground_truths in zip(__UpperCamelCase , __UpperCamelCase ):
total += 1
em += metric_max_over_ground_truths(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
fa += metric_max_over_ground_truths(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
A__ = 100.0 * em / total
A__ = 100.0 * fa / total
logger.info(f'''F1: {fa:.2f}''' )
logger.info(f'''EM: {em:.2f}''' )
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Optional[int]:
A__ = args.k
A__ = [line.strip() for line in open(__UpperCamelCase , 'r' ).readlines()]
A__ = [line.strip() for line in open(__UpperCamelCase , 'r' ).readlines()]
A__ = A__ = 0
for hypo, reference in zip(__UpperCamelCase , __UpperCamelCase ):
A__ = set(hypo.split('\t' )[:k] )
A__ = set(reference.split('\t' ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
A__ = 100.0 * em / total
logger.info(f'''Precision@{k}: {em: .2f}''' )
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Union[str, Any]:
def strip_title(__UpperCamelCase ):
if title.startswith('"' ):
A__ = title[1:]
if title.endswith('"' ):
A__ = title[:-1]
return title
A__ = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
__UpperCamelCase , return_tensors='pt' , padding=__UpperCamelCase , truncation=__UpperCamelCase , )['input_ids'].to(args.device )
A__ = rag_model.rag.question_encoder(__UpperCamelCase )
A__ = question_enc_outputs[0]
A__ = rag_model.retriever(
__UpperCamelCase , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors='pt' , )
A__ = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
A__ = []
for docs in all_docs:
A__ = [strip_title(__UpperCamelCase ) for title in docs['title']]
provenance_strings.append('\t'.join(__UpperCamelCase ) )
return provenance_strings
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Optional[Any]:
with torch.no_grad():
A__ = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
__UpperCamelCase , return_tensors='pt' , padding=__UpperCamelCase , truncation=__UpperCamelCase )
A__ = inputs_dict.input_ids.to(args.device )
A__ = inputs_dict.attention_mask.to(args.device )
A__ = rag_model.generate( # rag_model overwrites generate
__UpperCamelCase , attention_mask=__UpperCamelCase , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=__UpperCamelCase , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
A__ = rag_model.retriever.generator_tokenizer.batch_decode(__UpperCamelCase , skip_special_tokens=__UpperCamelCase )
if args.print_predictions:
for q, a in zip(__UpperCamelCase , __UpperCamelCase ):
logger.info('Q: {} - A: {}'.format(__UpperCamelCase , __UpperCamelCase ) )
return answers
def A ( ) -> Any:
A__ = argparse.ArgumentParser()
parser.add_argument(
'--model_type' , choices=['rag_sequence', 'rag_token', 'bart'] , type=__UpperCamelCase , help=(
'RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the'
' model_name_or_path'
) , )
parser.add_argument(
'--index_name' , default=__UpperCamelCase , choices=['exact', 'compressed', 'legacy'] , type=__UpperCamelCase , help='RAG model retriever type' , )
parser.add_argument(
'--index_path' , default=__UpperCamelCase , type=__UpperCamelCase , help='Path to the retrieval index' , )
parser.add_argument('--n_docs' , default=5 , type=__UpperCamelCase , help='Number of retrieved docs' )
parser.add_argument(
'--model_name_or_path' , default=__UpperCamelCase , type=__UpperCamelCase , required=__UpperCamelCase , help='Path to pretrained checkpoints or model identifier from huggingface.co/models' , )
parser.add_argument(
'--eval_mode' , choices=['e2e', 'retrieval'] , default='e2e' , type=__UpperCamelCase , help=(
'Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates'
' precision@k.'
) , )
parser.add_argument('--k' , default=1 , type=__UpperCamelCase , help='k for the precision@k calculation' )
parser.add_argument(
'--evaluation_set' , default=__UpperCamelCase , type=__UpperCamelCase , required=__UpperCamelCase , help='Path to a file containing evaluation samples' , )
parser.add_argument(
'--gold_data_path' , default=__UpperCamelCase , type=__UpperCamelCase , required=__UpperCamelCase , help='Path to a tab-separated file with gold samples' , )
parser.add_argument(
'--gold_data_mode' , default='qa' , type=__UpperCamelCase , choices=['qa', 'ans'] , help=(
'Format of the gold data file'
'qa - a single line in the following format: question [tab] answer_list'
'ans - a single line of the gold file contains the expected answer string'
) , )
parser.add_argument(
'--predictions_path' , type=__UpperCamelCase , default='predictions.txt' , help='Name of the predictions file, to be stored in the checkpoints directory' , )
parser.add_argument(
'--eval_all_checkpoints' , action='store_true' , help='Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number' , )
parser.add_argument(
'--eval_batch_size' , default=8 , type=__UpperCamelCase , help='Batch size per GPU/CPU for evaluation.' , )
parser.add_argument(
'--recalculate' , help='Recalculate predictions even if the prediction file exists' , action='store_true' , )
parser.add_argument(
'--num_beams' , default=4 , type=__UpperCamelCase , help='Number of beams to be used when generating answers' , )
parser.add_argument('--min_length' , default=1 , type=__UpperCamelCase , help='Min length of the generated answers' )
parser.add_argument('--max_length' , default=50 , type=__UpperCamelCase , help='Max length of the generated answers' )
parser.add_argument(
'--print_predictions' , action='store_true' , help='If True, prints predictions while evaluating.' , )
parser.add_argument(
'--print_docs' , action='store_true' , help='If True, prints docs retried while generating.' , )
A__ = parser.parse_args()
A__ = torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
return args
def A ( __UpperCamelCase ) -> int:
A__ = {}
if args.model_type is None:
A__ = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith('rag' ):
A__ = RagTokenForGeneration if args.model_type == 'rag_token' else RagSequenceForGeneration
A__ = args.n_docs
if args.index_name is not None:
A__ = args.index_name
if args.index_path is not None:
A__ = args.index_path
else:
A__ = BartForConditionalGeneration
A__ = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info('Evaluate the following checkpoints: %s' , __UpperCamelCase )
A__ = get_scores if args.eval_mode == 'e2e' else get_precision_at_k
A__ = evaluate_batch_eae if args.eval_mode == 'e2e' else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info('Calculating metrics based on an existing predictions file: {}'.format(args.predictions_path ) )
score_fn(__UpperCamelCase , args.predictions_path , args.gold_data_path )
continue
logger.info('***** Running evaluation for {} *****'.format(__UpperCamelCase ) )
logger.info(' Batch size = %d' , args.eval_batch_size )
logger.info(' Predictions will be stored under {}'.format(args.predictions_path ) )
if args.model_type.startswith('rag' ):
A__ = RagRetriever.from_pretrained(__UpperCamelCase , **__UpperCamelCase )
A__ = model_class.from_pretrained(__UpperCamelCase , retriever=__UpperCamelCase , **__UpperCamelCase )
model.retriever.init_retrieval()
else:
A__ = model_class.from_pretrained(__UpperCamelCase , **__UpperCamelCase )
model.to(args.device )
with open(args.evaluation_set , 'r' ) as eval_file, open(args.predictions_path , 'w' ) as preds_file:
A__ = []
for line in tqdm(__UpperCamelCase ):
questions.append(line.strip() )
if len(__UpperCamelCase ) == args.eval_batch_size:
A__ = evaluate_batch_fn(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
preds_file.write('\n'.join(__UpperCamelCase ) + '\n' )
preds_file.flush()
A__ = []
if len(__UpperCamelCase ) > 0:
A__ = evaluate_batch_fn(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
preds_file.write('\n'.join(__UpperCamelCase ) )
preds_file.flush()
score_fn(__UpperCamelCase , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = get_args()
main(args)
| 52 | 0 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
def A ( __UpperCamelCase , __UpperCamelCase=False , __UpperCamelCase=False ) -> Dict:
A__ = 'backbone.' if is_semantic else ''
A__ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''{prefix}blocks.{i}.norm1.weight''', f'''beit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm1.bias''', f'''beit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(f'''{prefix}blocks.{i}.attn.proj.weight''', f'''beit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append(
(f'''{prefix}blocks.{i}.attn.proj.bias''', f'''beit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm2.weight''', f'''beit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm2.bias''', f'''beit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc1.weight''', f'''beit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc1.bias''', f'''beit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc2.weight''', f'''beit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc2.bias''', f'''beit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
(f'''{prefix}cls_token''', 'beit.embeddings.cls_token'),
(f'''{prefix}patch_embed.proj.weight''', 'beit.embeddings.patch_embeddings.projection.weight'),
(f'''{prefix}patch_embed.proj.bias''', 'beit.embeddings.patch_embeddings.projection.bias'),
(f'''{prefix}pos_embed''', 'beit.embeddings.position_embeddings'),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
('mask_token', 'beit.embeddings.mask_token'),
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
('fc_norm.weight', 'beit.pooler.layernorm.weight'),
('fc_norm.bias', 'beit.pooler.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
return rename_keys
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=False , __UpperCamelCase=False ) -> Optional[Any]:
for i in range(config.num_hidden_layers ):
A__ = 'backbone.' if is_semantic else ''
# queries, keys and values
A__ = state_dict.pop(f'''{prefix}blocks.{i}.attn.qkv.weight''' )
A__ = state_dict.pop(f'''{prefix}blocks.{i}.attn.q_bias''' )
A__ = state_dict.pop(f'''{prefix}blocks.{i}.attn.v_bias''' )
A__ = in_proj_weight[
: config.hidden_size, :
]
A__ = q_bias
A__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A__ = in_proj_weight[
-config.hidden_size :, :
]
A__ = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
A__ = state_dict.pop(f'''{prefix}blocks.{i}.gamma_1''' )
A__ = state_dict.pop(f'''{prefix}blocks.{i}.gamma_2''' )
A__ = gamma_a
A__ = gamma_a
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Union[str, Any]:
A__ = dct.pop(__UpperCamelCase )
A__ = val
def A ( ) -> Dict:
A__ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
A__ = Image.open(requests.get(__UpperCamelCase , stream=__UpperCamelCase ).raw )
return im
@torch.no_grad()
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=False ) -> str:
A__ = False if 'rvlcdip' in checkpoint_url else True
A__ = BeitConfig(use_absolute_position_embeddings=__UpperCamelCase , use_mask_token=__UpperCamelCase )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
A__ = 1_024
A__ = 4_096
A__ = 24
A__ = 16
# labels
if "rvlcdip" in checkpoint_url:
A__ = 16
A__ = 'huggingface/label-files'
A__ = 'rvlcdip-id2label.json'
A__ = json.load(open(hf_hub_download(__UpperCamelCase , __UpperCamelCase , repo_type='dataset' ) , 'r' ) )
A__ = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
A__ = idalabel
A__ = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
A__ = torch.hub.load_state_dict_from_url(__UpperCamelCase , map_location='cpu' )['model']
A__ = create_rename_keys(__UpperCamelCase , has_lm_head=__UpperCamelCase )
for src, dest in rename_keys:
rename_key(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
read_in_q_k_v(__UpperCamelCase , __UpperCamelCase , has_lm_head=__UpperCamelCase )
# load HuggingFace model
A__ = BeitForMaskedImageModeling(__UpperCamelCase ) if has_lm_head else BeitForImageClassification(__UpperCamelCase )
model.eval()
model.load_state_dict(__UpperCamelCase )
# Check outputs on an image
A__ = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=__UpperCamelCase )
A__ = prepare_img()
A__ = image_processor(images=__UpperCamelCase , return_tensors='pt' )
A__ = encoding['pixel_values']
A__ = model(__UpperCamelCase )
A__ = outputs.logits
# verify logits
A__ = [1, 16] if 'rvlcdip' in checkpoint_url else [1, 196, 8_192]
assert logits.shape == torch.Size(__UpperCamelCase ), "Shape of logits not as expected"
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(__UpperCamelCase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__UpperCamelCase )
if push_to_hub:
if has_lm_head:
A__ = 'dit-base' if 'base' in checkpoint_url else 'dit-large'
else:
A__ = 'dit-base-finetuned-rvlcdip' if 'dit-b' in checkpoint_url else 'dit-large-finetuned-rvlcdip'
image_processor.push_to_hub(
repo_path_or_name=Path(__UpperCamelCase , __UpperCamelCase ) , organization='nielsr' , commit_message='Add image processor' , use_temp_dir=__UpperCamelCase , )
model.push_to_hub(
repo_path_or_name=Path(__UpperCamelCase , __UpperCamelCase ) , organization='nielsr' , commit_message='Add model' , use_temp_dir=__UpperCamelCase , )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_url''',
default='''https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth''',
type=str,
help='''URL to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 9 |
"""simple docstring"""
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _UpperCamelCase ( __snake_case , unittest.TestCase):
__lowerCamelCase = LongformerTokenizer
__lowerCamelCase = True
__lowerCamelCase = LongformerTokenizerFast
__lowerCamelCase = True
def A (self ):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
A__ = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
A__ = dict(zip(lowerCamelCase__ , range(len(lowerCamelCase__ ) ) ) )
A__ = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
A__ = {"""unk_token""": """<unk>"""}
A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowerCamelCase__ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(lowerCamelCase__ ) )
def A (self , **lowerCamelCase__ ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCamelCase__ )
def A (self , **lowerCamelCase__ ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **lowerCamelCase__ )
def A (self , lowerCamelCase__ ):
"""simple docstring"""
A__ = """lower newer"""
A__ = """lower newer"""
return input_text, output_text
def A (self ):
"""simple docstring"""
A__ = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
A__ = """lower newer"""
A__ = ["""l""", """o""", """w""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
A__ = tokenizer.tokenize(lowerCamelCase__ ) # , add_prefix_space=True)
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
A__ = tokens + [tokenizer.unk_token]
A__ = [0, 1, 2, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) , lowerCamelCase__ )
def A (self ):
"""simple docstring"""
A__ = self.get_tokenizer()
self.assertListEqual(tokenizer.encode("""Hello world!""" , add_special_tokens=lowerCamelCase__ ) , [0, 3_1_4_1_4, 2_3_2, 3_2_8, 2] )
self.assertListEqual(
tokenizer.encode("""Hello world! cécé herlolip 418""" , add_special_tokens=lowerCamelCase__ ) , [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2] , )
@slow
def A (self ):
"""simple docstring"""
A__ = self.tokenizer_class.from_pretrained("""allenai/longformer-base-4096""" )
A__ = tokenizer.encode("""sequence builders""" , add_special_tokens=lowerCamelCase__ )
A__ = tokenizer.encode("""multi-sequence build""" , add_special_tokens=lowerCamelCase__ )
A__ = tokenizer.encode(
"""sequence builders""" , add_special_tokens=lowerCamelCase__ , add_prefix_space=lowerCamelCase__ )
A__ = tokenizer.encode(
"""sequence builders""" , """multi-sequence build""" , add_special_tokens=lowerCamelCase__ , add_prefix_space=lowerCamelCase__ )
A__ = tokenizer.build_inputs_with_special_tokens(lowerCamelCase__ )
A__ = tokenizer.build_inputs_with_special_tokens(lowerCamelCase__ , lowerCamelCase__ )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def A (self ):
"""simple docstring"""
A__ = self.get_tokenizer()
A__ = """Encode this sequence."""
A__ = tokenizer.byte_encoder[""" """.encode("""utf-8""" )[0]]
# Testing encoder arguments
A__ = tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , add_prefix_space=lowerCamelCase__ )
A__ = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(lowerCamelCase__ , lowerCamelCase__ )
A__ = tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , add_prefix_space=lowerCamelCase__ )
A__ = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
tokenizer.add_special_tokens({"""bos_token""": """<s>"""} )
A__ = tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
A__ = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(lowerCamelCase__ , lowerCamelCase__ )
# Testing spaces after special tokens
A__ = """<mask>"""
tokenizer.add_special_tokens(
{"""mask_token""": AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ )} ) # mask token has a left space
A__ = tokenizer.convert_tokens_to_ids(lowerCamelCase__ )
A__ = """Encode <mask> sequence"""
A__ = """Encode <mask>sequence"""
A__ = tokenizer.encode(lowerCamelCase__ )
A__ = encoded.index(lowerCamelCase__ )
A__ = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
A__ = tokenizer.encode(lowerCamelCase__ )
A__ = encoded.index(lowerCamelCase__ )
A__ = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(lowerCamelCase__ , lowerCamelCase__ )
def A (self ):
"""simple docstring"""
pass
def A (self ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
A__ = self.rust_tokenizer_class.from_pretrained(lowerCamelCase__ , **lowerCamelCase__ )
A__ = self.tokenizer_class.from_pretrained(lowerCamelCase__ , **lowerCamelCase__ )
A__ = """A, <mask> AllenNLP sentence."""
A__ = tokenizer_r.encode_plus(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , return_token_type_ids=lowerCamelCase__ )
A__ = tokenizer_p.encode_plus(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , return_token_type_ids=lowerCamelCase__ )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , )
A__ = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
A__ = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(
lowerCamelCase__ , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
lowerCamelCase__ , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
def A (self ):
"""simple docstring"""
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
A__ = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=lowerCamelCase__ , add_prefix_space=lowerCamelCase__ , trim_offsets=lowerCamelCase__ )
A__ = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
A__ = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state["""add_prefix_space"""] , lowerCamelCase__ )
self.assertEqual(post_processor_state["""add_prefix_space"""] , lowerCamelCase__ )
self.assertEqual(post_processor_state["""trim_offsets"""] , lowerCamelCase__ )
def A (self ):
"""simple docstring"""
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
A__ = """hello""" # `hello` is a token in the vocabulary of `pretrained_name`
A__ = F"""{text_of_1_token} {text_of_1_token}"""
A__ = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase__ , use_fast=lowerCamelCase__ , add_prefix_space=lowerCamelCase__ , trim_offsets=lowerCamelCase__ )
A__ = tokenizer_r(lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCamelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCamelCase__ ) + 1, len(lowerCamelCase__ ) + 1 + len(lowerCamelCase__ )) , )
A__ = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase__ , use_fast=lowerCamelCase__ , add_prefix_space=lowerCamelCase__ , trim_offsets=lowerCamelCase__ )
A__ = tokenizer_r(lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCamelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCamelCase__ ) + 1, len(lowerCamelCase__ ) + 1 + len(lowerCamelCase__ )) , )
A__ = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase__ , use_fast=lowerCamelCase__ , add_prefix_space=lowerCamelCase__ , trim_offsets=lowerCamelCase__ )
A__ = tokenizer_r(lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCamelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCamelCase__ ), len(lowerCamelCase__ ) + 1 + len(lowerCamelCase__ )) , )
A__ = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase__ , use_fast=lowerCamelCase__ , add_prefix_space=lowerCamelCase__ , trim_offsets=lowerCamelCase__ )
A__ = tokenizer_r(lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCamelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCamelCase__ ), len(lowerCamelCase__ ) + 1 + len(lowerCamelCase__ )) , )
A__ = F""" {text}"""
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
A__ = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase__ , use_fast=lowerCamelCase__ , add_prefix_space=lowerCamelCase__ , trim_offsets=lowerCamelCase__ )
A__ = tokenizer_r(lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(lowerCamelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowerCamelCase__ ) + 1, 1 + len(lowerCamelCase__ ) + 1 + len(lowerCamelCase__ )) , )
A__ = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase__ , use_fast=lowerCamelCase__ , add_prefix_space=lowerCamelCase__ , trim_offsets=lowerCamelCase__ )
A__ = tokenizer_r(lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(lowerCamelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowerCamelCase__ ), 1 + len(lowerCamelCase__ ) + 1 + len(lowerCamelCase__ )) , )
A__ = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase__ , use_fast=lowerCamelCase__ , add_prefix_space=lowerCamelCase__ , trim_offsets=lowerCamelCase__ )
A__ = tokenizer_r(lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(lowerCamelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowerCamelCase__ ), 1 + len(lowerCamelCase__ ) + 1 + len(lowerCamelCase__ )) , )
| 574 | 0 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
SCREAMING_SNAKE_CASE__ : List[str] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Optional[int] = {
"ut/deta": "https://huggingface.co/ut/deta/resolve/main/config.json",
}
class snake_case ( UpperCamelCase_ ):
lowercase_ = 'deta'
lowercase_ = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self : List[str] , a_ : Tuple=None , a_ : Any=900 , a_ : Tuple=2048 , a_ : Union[str, Any]=6 , a_ : List[str]=2048 , a_ : int=8 , a_ : Tuple=6 , a_ : List[Any]=1024 , a_ : Dict=8 , a_ : Any=0.0 , a_ : Union[str, Any]=True , a_ : List[Any]="relu" , a_ : Optional[Any]=256 , a_ : Any=0.1 , a_ : str=0.0 , a_ : Union[str, Any]=0.0 , a_ : Tuple=0.02 , a_ : Union[str, Any]=1.0 , a_ : Tuple=True , a_ : Dict=False , a_ : int="sine" , a_ : str=5 , a_ : Any=4 , a_ : int=4 , a_ : List[Any]=True , a_ : List[Any]=300 , a_ : Dict=True , a_ : str=True , a_ : Optional[int]=1 , a_ : str=5 , a_ : Tuple=2 , a_ : List[Any]=1 , a_ : Dict=1 , a_ : Any=5 , a_ : Any=2 , a_ : Optional[int]=0.1 , a_ : str=0.25 , **a_ : Optional[int] , )-> List[Any]:
"""simple docstring"""
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
SCREAMING_SNAKE_CASE__ : Optional[Any] = CONFIG_MAPPING['resnet'](out_features=['stage2', 'stage3', 'stage4'] )
else:
if isinstance(a_ , a_ ):
SCREAMING_SNAKE_CASE__ : Optional[int] = backbone_config.pop('model_type' )
SCREAMING_SNAKE_CASE__ : List[Any] = CONFIG_MAPPING[backbone_model_type]
SCREAMING_SNAKE_CASE__ : Dict = config_class.from_dict(a_ )
SCREAMING_SNAKE_CASE__ : Dict = backbone_config
SCREAMING_SNAKE_CASE__ : Optional[int] = num_queries
SCREAMING_SNAKE_CASE__ : Optional[int] = max_position_embeddings
SCREAMING_SNAKE_CASE__ : str = d_model
SCREAMING_SNAKE_CASE__ : Any = encoder_ffn_dim
SCREAMING_SNAKE_CASE__ : Optional[int] = encoder_layers
SCREAMING_SNAKE_CASE__ : Dict = encoder_attention_heads
SCREAMING_SNAKE_CASE__ : Any = decoder_ffn_dim
SCREAMING_SNAKE_CASE__ : List[str] = decoder_layers
SCREAMING_SNAKE_CASE__ : List[str] = decoder_attention_heads
SCREAMING_SNAKE_CASE__ : Any = dropout
SCREAMING_SNAKE_CASE__ : str = attention_dropout
SCREAMING_SNAKE_CASE__ : Any = activation_dropout
SCREAMING_SNAKE_CASE__ : Any = activation_function
SCREAMING_SNAKE_CASE__ : Dict = init_std
SCREAMING_SNAKE_CASE__ : Any = init_xavier_std
SCREAMING_SNAKE_CASE__ : Optional[int] = encoder_layerdrop
SCREAMING_SNAKE_CASE__ : Optional[Any] = auxiliary_loss
SCREAMING_SNAKE_CASE__ : Dict = position_embedding_type
# deformable attributes
SCREAMING_SNAKE_CASE__ : str = num_feature_levels
SCREAMING_SNAKE_CASE__ : str = encoder_n_points
SCREAMING_SNAKE_CASE__ : Any = decoder_n_points
SCREAMING_SNAKE_CASE__ : Optional[int] = two_stage
SCREAMING_SNAKE_CASE__ : Dict = two_stage_num_proposals
SCREAMING_SNAKE_CASE__ : List[str] = with_box_refine
SCREAMING_SNAKE_CASE__ : Any = assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError('If two_stage is True, with_box_refine must be True.' )
# Hungarian matcher
SCREAMING_SNAKE_CASE__ : Optional[Any] = class_cost
SCREAMING_SNAKE_CASE__ : Dict = bbox_cost
SCREAMING_SNAKE_CASE__ : Dict = giou_cost
# Loss coefficients
SCREAMING_SNAKE_CASE__ : Union[str, Any] = mask_loss_coefficient
SCREAMING_SNAKE_CASE__ : str = dice_loss_coefficient
SCREAMING_SNAKE_CASE__ : str = bbox_loss_coefficient
SCREAMING_SNAKE_CASE__ : Optional[Any] = giou_loss_coefficient
SCREAMING_SNAKE_CASE__ : int = eos_coefficient
SCREAMING_SNAKE_CASE__ : int = focal_alpha
super().__init__(is_encoder_decoder=a_ , **a_ )
@property
def __lowercase( self : Optional[Any] )-> int:
"""simple docstring"""
return self.encoder_attention_heads
@property
def __lowercase( self : str )-> int:
"""simple docstring"""
return self.d_model
def __lowercase( self : Optional[Any] )-> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.backbone_config.to_dict()
SCREAMING_SNAKE_CASE__ : Optional[int] = self.__class__.model_type
return output
| 712 | def _a ( lowercase__ : int = 1_00_00_00 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = [i - 1 for i in range(limit + 1 )]
for i in range(2 , limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i , limit + 1 , lowercase__ ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 636 | 0 |
"""simple docstring"""
from __future__ import annotations
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = False , ):
UpperCamelCase : int = cipher_alphabet or [chr(SCREAMING_SNAKE_CASE ) for i in range(97 , 123 )]
# If the argument is None or the user provided an empty dictionary
if not frequencies_dict:
# Frequencies of letters in the english language (how much they show up)
UpperCamelCase : Any = {
"""a""": 0.0_84_97,
"""b""": 0.0_14_92,
"""c""": 0.0_22_02,
"""d""": 0.0_42_53,
"""e""": 0.1_11_62,
"""f""": 0.0_22_28,
"""g""": 0.0_20_15,
"""h""": 0.0_60_94,
"""i""": 0.0_75_46,
"""j""": 0.0_01_53,
"""k""": 0.0_12_92,
"""l""": 0.0_40_25,
"""m""": 0.0_24_06,
"""n""": 0.0_67_49,
"""o""": 0.0_75_07,
"""p""": 0.0_19_29,
"""q""": 0.0_00_95,
"""r""": 0.0_75_87,
"""s""": 0.0_63_27,
"""t""": 0.0_93_56,
"""u""": 0.0_27_58,
"""v""": 0.0_09_78,
"""w""": 0.0_25_60,
"""x""": 0.0_01_50,
"""y""": 0.0_19_94,
"""z""": 0.0_00_77,
}
else:
# Custom frequencies dictionary
UpperCamelCase : List[Any] = frequencies_dict
if not case_sensitive:
UpperCamelCase : Optional[Any] = ciphertext.lower()
# Chi squared statistic values
UpperCamelCase : dict[int, tuple[float, str]] = {}
# cycle through all of the shifts
for shift in range(len(SCREAMING_SNAKE_CASE ) ):
UpperCamelCase : List[str] = """"""
# decrypt the message with the shift
for letter in ciphertext:
try:
# Try to index the letter in the alphabet
UpperCamelCase : Any = (alphabet_letters.index(letter.lower() ) - shift) % len(
SCREAMING_SNAKE_CASE )
decrypted_with_shift += (
alphabet_letters[new_key].upper()
if case_sensitive and letter.isupper()
else alphabet_letters[new_key]
)
except ValueError:
# Append the character if it isn't in the alphabet
decrypted_with_shift += letter
UpperCamelCase : str = 0.0
# Loop through each letter in the decoded message with the shift
for letter in decrypted_with_shift:
if case_sensitive:
UpperCamelCase : str = letter.lower()
if letter in frequencies:
# Get the amount of times the letter occurs in the message
UpperCamelCase : str = decrypted_with_shift.lower().count(SCREAMING_SNAKE_CASE )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
UpperCamelCase : Optional[Any] = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
UpperCamelCase : str = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
else:
if letter.lower() in frequencies:
# Get the amount of times the letter occurs in the message
UpperCamelCase : Dict = decrypted_with_shift.count(SCREAMING_SNAKE_CASE )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
UpperCamelCase : int = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
UpperCamelCase : List[str] = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
# Add the data to the chi_squared_statistic_values dictionary
UpperCamelCase : Optional[Any] = (
chi_squared_statistic,
decrypted_with_shift,
)
# Get the most likely cipher by finding the cipher with the smallest chi squared
# statistic
def chi_squared_statistic_values_sorting_key(SCREAMING_SNAKE_CASE ) -> tuple[float, str]:
return chi_squared_statistic_values[key]
UpperCamelCase : int = min(
SCREAMING_SNAKE_CASE , key=SCREAMING_SNAKE_CASE , )
# Get all the data from the most likely cipher (key, decoded message)
(
(
UpperCamelCase
) , (
UpperCamelCase
) ,
) : Union[str, Any] = chi_squared_statistic_values[most_likely_cipher]
# Return the data on the most likely shift
return (
most_likely_cipher,
most_likely_cipher_chi_squared_value,
decoded_most_likely_cipher,
)
| 102 |
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def _A ( ) -> int:
"""simple docstring"""
__UpperCamelCase = 'https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png'
__UpperCamelCase = Image.open(requests.get(_lowercase , stream=_lowercase ).raw ).convert('RGB' )
return image
def _A ( _lowercase ) -> int:
"""simple docstring"""
__UpperCamelCase = []
# fmt: off
# vision encoder
rename_keys.append(('visual_encoder.cls_token', 'vision_model.embeddings.class_embedding') )
rename_keys.append(('visual_encoder.pos_embed', 'vision_model.embeddings.position_embedding') )
rename_keys.append(('visual_encoder.patch_embed.proj.weight', 'vision_model.embeddings.patch_embedding.weight') )
rename_keys.append(('visual_encoder.patch_embed.proj.bias', 'vision_model.embeddings.patch_embedding.bias') )
rename_keys.append(('ln_vision.weight', 'vision_model.post_layernorm.weight') )
rename_keys.append(('ln_vision.bias', 'vision_model.post_layernorm.bias') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.weight''', f'''vision_model.encoder.layers.{i}.layer_norm1.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.bias''', f'''vision_model.encoder.layers.{i}.layer_norm1.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.weight''', f'''vision_model.encoder.layers.{i}.layer_norm2.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.bias''', f'''vision_model.encoder.layers.{i}.layer_norm2.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.qkv.weight''', f'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.weight''', f'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.bias''', f'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') )
# QFormer
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.weight', 'qformer.layernorm.weight') )
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.bias', 'qformer.layernorm.bias') )
# fmt: on
return rename_keys
def _A ( _lowercase , _lowercase , _lowercase ) -> Optional[int]:
"""simple docstring"""
__UpperCamelCase = dct.pop(_lowercase )
__UpperCamelCase = val
def _A ( _lowercase , _lowercase ) -> int:
"""simple docstring"""
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
__UpperCamelCase = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.q_bias''' )
__UpperCamelCase = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.v_bias''' )
# next, set bias in the state dict
__UpperCamelCase = torch.cat((q_bias, torch.zeros_like(_lowercase , requires_grad=_lowercase ), v_bias) )
__UpperCamelCase = qkv_bias
def _A ( _lowercase , _lowercase ) -> Any:
"""simple docstring"""
__UpperCamelCase = 3_64 if 'coco' in model_name else 2_24
__UpperCamelCase = BlipaVisionConfig(image_size=_lowercase ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
__UpperCamelCase = OPTConfig.from_pretrained('facebook/opt-2.7b' , eos_token_id=_lowercase ).to_dict()
elif "opt-6.7b" in model_name:
__UpperCamelCase = OPTConfig.from_pretrained('facebook/opt-6.7b' , eos_token_id=_lowercase ).to_dict()
elif "t5-xl" in model_name:
__UpperCamelCase = TaConfig.from_pretrained('google/flan-t5-xl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
__UpperCamelCase = TaConfig.from_pretrained('google/flan-t5-xxl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
__UpperCamelCase = BlipaConfig(vision_config=_lowercase , text_config=_lowercase )
return config, image_size
@torch.no_grad()
def _A ( _lowercase , _lowercase=None , _lowercase=False ) -> Union[str, Any]:
"""simple docstring"""
__UpperCamelCase = (
AutoTokenizer.from_pretrained('facebook/opt-2.7b' )
if 'opt' in model_name
else AutoTokenizer.from_pretrained('google/flan-t5-xl' )
)
__UpperCamelCase = tokenizer('\n' , add_special_tokens=_lowercase ).input_ids[0]
__UpperCamelCase, __UpperCamelCase = get_blipa_config(_lowercase , eos_token_id=_lowercase )
__UpperCamelCase = BlipaForConditionalGeneration(_lowercase ).eval()
__UpperCamelCase = {
'blip2-opt-2.7b': ('blip2_opt', 'pretrain_opt2.7b'),
'blip2-opt-6.7b': ('blip2_opt', 'pretrain_opt6.7b'),
'blip2-opt-2.7b-coco': ('blip2_opt', 'caption_coco_opt2.7b'),
'blip2-opt-6.7b-coco': ('blip2_opt', 'caption_coco_opt6.7b'),
'blip2-flan-t5-xl': ('blip2_t5', 'pretrain_flant5xl'),
'blip2-flan-t5-xl-coco': ('blip2_t5', 'caption_coco_flant5xl'),
'blip2-flan-t5-xxl': ('blip2_t5', 'pretrain_flant5xxl'),
}
__UpperCamelCase, __UpperCamelCase = model_name_to_original[model_name]
# load original model
print('Loading original model...' )
__UpperCamelCase = 'cuda' if torch.cuda.is_available() else 'cpu'
__UpperCamelCase, __UpperCamelCase, __UpperCamelCase = load_model_and_preprocess(
name=_lowercase , model_type=_lowercase , is_eval=_lowercase , device=_lowercase )
original_model.eval()
print('Done!' )
# update state dict keys
__UpperCamelCase = original_model.state_dict()
__UpperCamelCase = create_rename_keys(_lowercase )
for src, dest in rename_keys:
rename_key(_lowercase , _lowercase , _lowercase )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
__UpperCamelCase = state_dict.pop(_lowercase )
if key.startswith('Qformer.bert' ):
__UpperCamelCase = key.replace('Qformer.bert' , 'qformer' )
if "attention.self" in key:
__UpperCamelCase = key.replace('self' , 'attention' )
if "opt_proj" in key:
__UpperCamelCase = key.replace('opt_proj' , 'language_projection' )
if "t5_proj" in key:
__UpperCamelCase = key.replace('t5_proj' , 'language_projection' )
if key.startswith('opt' ):
__UpperCamelCase = key.replace('opt' , 'language' )
if key.startswith('t5' ):
__UpperCamelCase = key.replace('t5' , 'language' )
__UpperCamelCase = val
# read in qv biases
read_in_q_v_bias(_lowercase , _lowercase )
__UpperCamelCase, __UpperCamelCase = hf_model.load_state_dict(_lowercase , strict=_lowercase )
assert len(_lowercase ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
__UpperCamelCase = load_demo_image()
__UpperCamelCase = vis_processors['eval'](_lowercase ).unsqueeze(0 ).to(_lowercase )
__UpperCamelCase = tokenizer(['\n'] , return_tensors='pt' ).input_ids.to(_lowercase )
# create processor
__UpperCamelCase = BlipImageProcessor(
size={'height': image_size, 'width': image_size} , image_mean=_lowercase , image_std=_lowercase )
__UpperCamelCase = BlipaProcessor(image_processor=_lowercase , tokenizer=_lowercase )
__UpperCamelCase = processor(images=_lowercase , return_tensors='pt' ).pixel_values.to(_lowercase )
# make sure processor creates exact same pixel values
assert torch.allclose(_lowercase , _lowercase )
original_model.to(_lowercase )
hf_model.to(_lowercase )
with torch.no_grad():
if "opt" in model_name:
__UpperCamelCase = original_model({'image': original_pixel_values, 'text_input': ['']} ).logits
__UpperCamelCase = hf_model(_lowercase , _lowercase ).logits
else:
__UpperCamelCase = original_model(
{'image': original_pixel_values, 'text_input': ['\n'], 'text_output': ['\n']} ).logits
__UpperCamelCase = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -1_00 )
__UpperCamelCase = hf_model(_lowercase , _lowercase , labels=_lowercase ).logits
assert original_logits.shape == logits.shape
print('First values of original logits:' , original_logits[0, :3, :3] )
print('First values of HF logits:' , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
__UpperCamelCase = torch.tensor(
[[-41.58_50, -4.44_40, -8.99_22], [-47.43_22, -5.91_43, -1.73_40]] , device=_lowercase )
assert torch.allclose(logits[0, :3, :3] , _lowercase , atol=1e-4 )
elif model_name == "blip2-flan-t5-xl-coco":
__UpperCamelCase = torch.tensor(
[[-57.01_09, -9.89_67, -12.62_80], [-68.65_78, -12.71_91, -10.50_65]] , device=_lowercase )
else:
# cast to same type
__UpperCamelCase = logits.dtype
assert torch.allclose(original_logits.to(_lowercase ) , _lowercase , atol=1e-2 )
print('Looks ok!' )
print('Generating a caption...' )
__UpperCamelCase = ''
__UpperCamelCase = tokenizer(_lowercase , return_tensors='pt' ).input_ids.to(_lowercase )
__UpperCamelCase = original_model.generate({'image': original_pixel_values} )
__UpperCamelCase = hf_model.generate(
_lowercase , _lowercase , do_sample=_lowercase , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print('Original generation:' , _lowercase )
__UpperCamelCase = input_ids.shape[1]
__UpperCamelCase = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=_lowercase )
__UpperCamelCase = [text.strip() for text in output_text]
print('HF generation:' , _lowercase )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(_lowercase )
hf_model.save_pretrained(_lowercase )
if push_to_hub:
processor.push_to_hub(f'''nielsr/{model_name}''' )
hf_model.push_to_hub(f'''nielsr/{model_name}''' )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
__snake_case = [
'''blip2-opt-2.7b''',
'''blip2-opt-6.7b''',
'''blip2-opt-2.7b-coco''',
'''blip2-opt-6.7b-coco''',
'''blip2-flan-t5-xl''',
'''blip2-flan-t5-xl-coco''',
'''blip2-flan-t5-xxl''',
]
parser.add_argument(
'''--model_name''',
default='''blip2-opt-2.7b''',
choices=choices,
type=str,
help='''Path to hf config.json of model to convert''',
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub after converting''',
)
__snake_case = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 1 | 0 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a__ = logging.get_logger(__name__)
a__ = {'''vocab_file''': '''spiece.model'''}
a__ = {
'''vocab_file''': {
'''bert_for_seq_generation''': (
'''https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model'''
),
}
}
a__ = {'''bert_for_seq_generation''': 512}
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = VOCAB_FILES_NAMES
UpperCAmelCase__ : int = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : List[int] = []
UpperCAmelCase__ : Any = ["input_ids", "attention_mask"]
def __init__( self , _a , _a="<s>" , _a="</s>" , _a="<unk>" , _a="<pad>" , _a="<::::>" , _a = None , **_a , ) -> None:
_a : Tuple = {} if sp_model_kwargs is None else sp_model_kwargs
# Add extra_ids to the special token list
super().__init__(
bos_token=_a , eos_token=_a , unk_token=_a , pad_token=_a , sep_token=_a , sp_model_kwargs=self.sp_model_kwargs , **_a , )
_a : List[str] = vocab_file
_a : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_a )
@property
def __lowercase ( self ) -> Any:
return self.sp_model.get_piece_size()
def __lowercase ( self ) -> List[Any]:
_a : Optional[Any] = {self.convert_ids_to_tokens(_a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Any:
_a : Union[str, Any] = self.__dict__.copy()
_a : Union[str, Any] = None
return state
def __setstate__( self , _a ) -> int:
_a : str = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
_a : Union[str, Any] = {}
_a : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __lowercase ( self , _a ) -> List[str]:
return self.sp_model.encode(_a , out_type=_a )
def __lowercase ( self , _a ) -> Optional[Any]:
return self.sp_model.piece_to_id(_a )
def __lowercase ( self , _a ) -> Optional[Any]:
_a : Union[str, Any] = self.sp_model.IdToPiece(_a )
return token
def __lowercase ( self , _a ) -> Union[str, Any]:
_a : Any = []
_a : List[Any] = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(_a ) + token
_a : List[Any] = []
else:
current_sub_tokens.append(_a )
out_string += self.sp_model.decode(_a )
return out_string.strip()
def __lowercase ( self , _a , _a = None ) -> Tuple[str]:
if not os.path.isdir(_a ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_a : Tuple = os.path.join(
_a , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _a )
elif not os.path.isfile(self.vocab_file ):
with open(_a , '''wb''' ) as fi:
_a : Optional[int] = self.sp_model.serialized_model_proto()
fi.write(_a )
return (out_vocab_file,)
| 701 |
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
a__ = TypeVar('''KEY''')
a__ = TypeVar('''VAL''')
@dataclass(frozen=__lowercase , slots=__lowercase )
class UpperCAmelCase_ ( Generic[KEY, VAL] ):
"""simple docstring"""
UpperCAmelCase__ : KEY
UpperCAmelCase__ : VAL
class UpperCAmelCase_ ( _Item ):
"""simple docstring"""
def __init__( self ) -> None:
super().__init__(_a , _a )
def __bool__( self ) -> bool:
return False
a__ = _DeletedItem()
class UpperCAmelCase_ ( MutableMapping[KEY, VAL] ):
"""simple docstring"""
def __init__( self , _a = 8 , _a = 0.75 ) -> None:
_a : Optional[Any] = initial_block_size
_a : list[_Item | None] = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
_a : Tuple = capacity_factor
_a : Optional[Any] = 0
def __lowercase ( self , _a ) -> int:
return hash(_a ) % len(self._buckets )
def __lowercase ( self , _a ) -> int:
return (ind + 1) % len(self._buckets )
def __lowercase ( self , _a , _a , _a ) -> bool:
_a : Optional[Any] = self._buckets[ind]
if not stored:
_a : List[Any] = _Item(_a , _a )
self._len += 1
return True
elif stored.key == key:
_a : int = _Item(_a , _a )
return True
else:
return False
def __lowercase ( self ) -> bool:
_a : List[Any] = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(_a )
def __lowercase ( self ) -> bool:
if len(self._buckets ) <= self._initial_block_size:
return False
_a : Union[str, Any] = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def __lowercase ( self , _a ) -> None:
_a : Any = self._buckets
_a : str = [None] * new_size
_a : Tuple = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def __lowercase ( self ) -> None:
self._resize(len(self._buckets ) * 2 )
def __lowercase ( self ) -> None:
self._resize(len(self._buckets ) // 2 )
def __lowercase ( self , _a ) -> Iterator[int]:
_a : str = self._get_bucket_index(_a )
for _ in range(len(self._buckets ) ):
yield ind
_a : List[Any] = self._get_next_ind(_a )
def __lowercase ( self , _a , _a ) -> None:
for ind in self._iterate_buckets(_a ):
if self._try_set(_a , _a , _a ):
break
def __setitem__( self , _a , _a ) -> None:
if self._is_full():
self._size_up()
self._add_item(_a , _a )
def __delitem__( self , _a ) -> None:
for ind in self._iterate_buckets(_a ):
_a : List[str] = self._buckets[ind]
if item is None:
raise KeyError(_a )
if item is _deleted:
continue
if item.key == key:
_a : Optional[Any] = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self , _a ) -> VAL:
for ind in self._iterate_buckets(_a ):
_a : int = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(_a )
def __len__( self ) -> int:
return self._len
def __iter__( self ) -> Iterator[KEY]:
yield from (item.key for item in self._buckets if item)
def __repr__( self ) -> str:
_a : int = ''' ,'''.join(
F"""{item.key}: {item.val}""" for item in self._buckets if item )
return F"""HashMap({val_string})"""
| 578 | 0 |
from __future__ import annotations
def snake_case (__lowercase , __lowercase ) -> list[list[int]]:
'''simple docstring'''
_snake_case : list[list[int]] = []
_snake_case : list[int] = []
_snake_case : Optional[int] = 0
_snake_case : int = sum(__lowercase )
create_state_space_tree(__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
return result
def snake_case (__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ) -> None:
'''simple docstring'''
if sum(__lowercase ) > max_sum or (remaining_nums_sum + sum(__lowercase )) < max_sum:
return
if sum(__lowercase ) == max_sum:
result.append(__lowercase )
return
for index in range(__lowercase , len(__lowercase ) ):
create_state_space_tree(
__lowercase , __lowercase , index + 1 , [*path, nums[index]] , __lowercase , remaining_nums_sum - nums[index] , )
__SCREAMING_SNAKE_CASE : str = [3, 3_4, 4, 1_2, 5, 2]
__SCREAMING_SNAKE_CASE : Union[str, Any] = 9
__SCREAMING_SNAKE_CASE : Dict = generate_sum_of_subsets_soln(nums, max_sum)
print(*result) | 670 | import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
__SCREAMING_SNAKE_CASE : List[Any] = logging.get_logger(__name__)
class lowercase_ ( __snake_case ):
def __init__( self , lowercase_ ):
super().__init__()
_snake_case : List[str] = nn.ModuleList(lowercase_ )
def UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = False , lowercase_ = True , ):
for i, (image, scale, controlnet) in enumerate(zip(lowercase_ , lowercase_ , self.nets ) ):
_snake_case ,_snake_case : Optional[int] = controlnet(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , )
# merge samples
if i == 0:
_snake_case ,_snake_case : Tuple = down_samples, mid_sample
else:
_snake_case : Tuple = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(lowercase_ , lowercase_ )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def UpperCamelCase ( self , lowercase_ , lowercase_ = True , lowercase_ = None , lowercase_ = False , lowercase_ = None , ):
_snake_case : Tuple = 0
_snake_case : Dict = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
lowercase_ , is_main_process=lowercase_ , save_function=lowercase_ , safe_serialization=lowercase_ , variant=lowercase_ , )
idx += 1
_snake_case : int = model_path_to_save + f"""_{idx}"""
@classmethod
def UpperCamelCase ( cls , lowercase_ , **lowercase_ ):
_snake_case : List[str] = 0
_snake_case : Optional[Any] = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
_snake_case : Optional[Any] = pretrained_model_path
while os.path.isdir(lowercase_ ):
_snake_case : int = ControlNetModel.from_pretrained(lowercase_ , **lowercase_ )
controlnets.append(lowercase_ )
idx += 1
_snake_case : str = pretrained_model_path + f"""_{idx}"""
logger.info(f"""{len(lowercase_ )} controlnets loaded from {pretrained_model_path}.""" )
if len(lowercase_ ) == 0:
raise ValueError(
f"""No ControlNets found under {os.path.dirname(lowercase_ )}. Expected at least {pretrained_model_path + '_0'}.""" )
return cls(lowercase_ ) | 670 | 1 |
"""simple docstring"""
import unittest
import numpy as np
from diffusers import OnnxStableDiffusionInpaintPipelineLegacy
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
load_numpy,
nightly,
require_onnxruntime,
require_torch_gpu,
)
if is_onnx_available():
import onnxruntime as ort
@nightly
@require_onnxruntime
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@property
def A__ ( self ) -> Optional[Any]:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def A__ ( self ) -> List[str]:
__lowerCAmelCase = ort.SessionOptions()
__lowerCAmelCase = False
return options
def A__ ( self ) -> Optional[Any]:
__lowerCAmelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo.png""" )
__lowerCAmelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo_mask.png""" )
__lowerCAmelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy""" )
# using the PNDM scheduler by default
__lowerCAmelCase = OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""onnx""" , safety_checker=snake_case_ , feature_extractor=snake_case_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=snake_case_ )
__lowerCAmelCase = """A red cat sitting on a park bench"""
__lowerCAmelCase = np.random.RandomState(0 )
__lowerCAmelCase = pipe(
prompt=snake_case_ , image=snake_case_ , mask_image=snake_case_ , strength=0.75 , guidance_scale=7.5 , num_inference_steps=15 , generator=snake_case_ , output_type="""np""" , )
__lowerCAmelCase = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1e-2
| 712 |
"""simple docstring"""
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class lowerCAmelCase_ ( A__ ):
'''simple docstring'''
def A__ ( self , snake_case_ ) -> Optional[int]:
with open(snake_case_ , encoding="""utf-8""" ) as input_file:
__lowerCAmelCase = re.compile(r"""(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)""" )
__lowerCAmelCase = input_file.read()
__lowerCAmelCase = regexp.search(snake_case_ )
return match
def A__ ( self , snake_case_ ) -> Union[str, Any]:
with open(snake_case_ , encoding="""utf-8""" ) as input_file:
__lowerCAmelCase = re.compile(r"""#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()""" , re.DOTALL )
__lowerCAmelCase = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
__lowerCAmelCase = regexp.finditer(snake_case_ )
__lowerCAmelCase = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def A__ ( self ) -> Optional[int]:
__lowerCAmelCase = Path("""./datasets""" )
__lowerCAmelCase = list(dataset_paths.absolute().glob("""**/*.py""" ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(snake_case_ ) ):
raise AssertionError(f"""open(...) must use utf-8 encoding in {dataset}""" )
def A__ ( self ) -> Tuple:
__lowerCAmelCase = Path("""./datasets""" )
__lowerCAmelCase = list(dataset_paths.absolute().glob("""**/*.py""" ) )
for dataset in dataset_files:
if self._no_print_statements(str(snake_case_ ) ):
raise AssertionError(f"""print statement found in {dataset}. Use datasets.logger/logging instead.""" )
| 573 | 0 |
'''simple docstring'''
def UpperCAmelCase_ ( lowerCamelCase_ ):
"""simple docstring"""
return " ".join(
"".join(word[::-1] ) if len(lowerCamelCase_ ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words("""Hey wollef sroirraw"""))
| 378 |
'''simple docstring'''
import torch
from torch import nn
from transformers import CLIPPreTrainedModel, CLIPVisionModel
from ...models.attention import BasicTransformerBlock
from ...utils import logging
snake_case = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowerCAmelCase ( UpperCamelCase_ ):
def __init__( self : Union[str, Any] , a__ : int , a__ : List[Any]=768 ):
'''simple docstring'''
super().__init__(a__ )
lowerCAmelCase__ : int = proj_size
lowerCAmelCase__ : str = CLIPVisionModel(a__ )
lowerCAmelCase__ : int = PaintByExampleMapper(a__ )
lowerCAmelCase__ : Optional[int] = nn.LayerNorm(config.hidden_size )
lowerCAmelCase__ : Optional[Any] = nn.Linear(config.hidden_size , self.proj_size )
# uncondition for scaling
lowerCAmelCase__ : Dict = nn.Parameter(torch.randn((1, 1, self.proj_size) ) )
def _A ( self : Tuple , a__ : int , a__ : Optional[Any]=False ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = self.model(pixel_values=a__ )
lowerCAmelCase__ : Any = clip_output.pooler_output
lowerCAmelCase__ : Union[str, Any] = self.mapper(latent_states[:, None] )
lowerCAmelCase__ : List[Any] = self.final_layer_norm(a__ )
lowerCAmelCase__ : Optional[int] = self.proj_out(a__ )
if return_uncond_vector:
return latent_states, self.uncond_vector
return latent_states
class lowerCAmelCase ( nn.Module ):
def __init__( self : Optional[Any] , a__ : Optional[Any] ):
'''simple docstring'''
super().__init__()
lowerCAmelCase__ : Optional[Any] = (config.num_hidden_layers + 1) // 5
lowerCAmelCase__ : str = config.hidden_size
lowerCAmelCase__ : Dict = 1
lowerCAmelCase__ : int = nn.ModuleList(
[
BasicTransformerBlock(a__ , a__ , a__ , activation_fn="gelu" , attention_bias=a__ )
for _ in range(a__ )
] )
def _A ( self : int , a__ : Union[str, Any] ):
'''simple docstring'''
for block in self.blocks:
lowerCAmelCase__ : List[Any] = block(a__ )
return hidden_states
| 378 | 1 |
import math
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
'''facebook/data2vec-base-960h''': '''https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json''',
# See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio
}
class snake_case_ (__lowerCamelCase ):
"""simple docstring"""
_lowerCamelCase = """data2vec-audio"""
def __init__( self ,lowercase=32 ,lowercase=768 ,lowercase=12 ,lowercase=12 ,lowercase=3072 ,lowercase="gelu" ,lowercase=0.1 ,lowercase=0.1 ,lowercase=0.1 ,lowercase=0.0 ,lowercase=0.1 ,lowercase=0.1 ,lowercase=0.02 ,lowercase=1E-5 ,lowercase="gelu" ,lowercase=(512, 512, 512, 512, 512, 512, 512) ,lowercase=(5, 2, 2, 2, 2, 2, 2) ,lowercase=(10, 3, 3, 3, 3, 2, 2) ,lowercase=False ,lowercase=16 ,lowercase=19 ,lowercase=5 ,lowercase=0.05 ,lowercase=10 ,lowercase=2 ,lowercase=0.0 ,lowercase=10 ,lowercase=0 ,lowercase="sum" ,lowercase=False ,lowercase=False ,lowercase=256 ,lowercase=(512, 512, 512, 512, 1500) ,lowercase=(5, 3, 3, 1, 1) ,lowercase=(1, 2, 3, 1, 1) ,lowercase=512 ,lowercase=0 ,lowercase=1 ,lowercase=2 ,lowercase=False ,lowercase=3 ,lowercase=2 ,lowercase=3 ,lowercase=None ,**lowercase ,):
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE_ ,pad_token_id=SCREAMING_SNAKE_CASE_ ,bos_token_id=SCREAMING_SNAKE_CASE_ ,eos_token_id=SCREAMING_SNAKE_CASE_)
UpperCAmelCase_ : Optional[int] = hidden_size
UpperCAmelCase_ : List[Any] = feat_extract_activation
UpperCAmelCase_ : Union[str, Any] = list(SCREAMING_SNAKE_CASE_)
UpperCAmelCase_ : List[Any] = list(SCREAMING_SNAKE_CASE_)
UpperCAmelCase_ : List[str] = list(SCREAMING_SNAKE_CASE_)
UpperCAmelCase_ : Union[str, Any] = conv_bias
UpperCAmelCase_ : List[Any] = num_conv_pos_embeddings
UpperCAmelCase_ : Dict = num_conv_pos_embedding_groups
UpperCAmelCase_ : str = conv_pos_kernel_size
UpperCAmelCase_ : Any = len(self.conv_dim)
UpperCAmelCase_ : List[str] = num_hidden_layers
UpperCAmelCase_ : Tuple = intermediate_size
UpperCAmelCase_ : Dict = hidden_act
UpperCAmelCase_ : str = num_attention_heads
UpperCAmelCase_ : List[str] = hidden_dropout
UpperCAmelCase_ : Union[str, Any] = attention_dropout
UpperCAmelCase_ : str = activation_dropout
UpperCAmelCase_ : Union[str, Any] = feat_proj_dropout
UpperCAmelCase_ : Optional[Any] = final_dropout
UpperCAmelCase_ : List[str] = layerdrop
UpperCAmelCase_ : Union[str, Any] = layer_norm_eps
UpperCAmelCase_ : Optional[int] = initializer_range
UpperCAmelCase_ : List[str] = vocab_size
UpperCAmelCase_ : int = use_weighted_layer_sum
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
F""" {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,"""
F""" `len(config.conv_kernel) = {len(self.conv_kernel)}`.""")
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCAmelCase_ : List[str] = mask_time_prob
UpperCAmelCase_ : List[str] = mask_time_length
UpperCAmelCase_ : List[Any] = mask_time_min_masks
UpperCAmelCase_ : List[Any] = mask_feature_prob
UpperCAmelCase_ : Dict = mask_feature_length
UpperCAmelCase_ : List[Any] = mask_feature_min_masks
# ctc loss
UpperCAmelCase_ : Union[str, Any] = ctc_loss_reduction
UpperCAmelCase_ : Dict = ctc_zero_infinity
# adapter
UpperCAmelCase_ : List[Any] = add_adapter
UpperCAmelCase_ : str = adapter_kernel_size
UpperCAmelCase_ : str = adapter_stride
UpperCAmelCase_ : int = num_adapter_layers
UpperCAmelCase_ : List[str] = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
UpperCAmelCase_ : Dict = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
UpperCAmelCase_ : Tuple = list(SCREAMING_SNAKE_CASE_)
UpperCAmelCase_ : Union[str, Any] = list(SCREAMING_SNAKE_CASE_)
UpperCAmelCase_ : int = list(SCREAMING_SNAKE_CASE_)
UpperCAmelCase_ : Tuple = xvector_output_dim
@property
def A_ ( self):
"""simple docstring"""
return math.prod(self.conv_stride)
| 720 |
def _snake_case ( __snake_case , __snake_case ) -> str:
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError("the value of both inputs must be positive" )
UpperCAmelCase_ : List[Any] = str(bin(__snake_case ) )[2:] # remove the leading "0b"
UpperCAmelCase_ : int = str(bin(__snake_case ) )[2:] # remove the leading "0b"
UpperCAmelCase_ : Optional[int] = max(len(__snake_case ) , len(__snake_case ) )
return "0b" + "".join(
str(int(char_a == "1" and char_b == "1" ) )
for char_a, char_b in zip(a_binary.zfill(__snake_case ) , b_binary.zfill(__snake_case ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 455 | 0 |
import argparse
import glob
import logging
import os
from argparse import Namespace
from importlib import import_module
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch.nn import CrossEntropyLoss
from torch.utils.data import DataLoader, TensorDataset
from utils_ner import TokenClassificationTask
__lowerCamelCase : str = logging.getLogger(__name__)
class a__ ( A__ ):
A = 'token-classification'
def __init__( self : Optional[Any],_A : Optional[Any] ):
"""simple docstring"""
if type(_A ) == dict:
SCREAMING_SNAKE_CASE_ : List[str] = Namespace(**_A )
SCREAMING_SNAKE_CASE_ : Optional[Any] = import_module("tasks" )
try:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = getattr(_A,hparams.task_type )
SCREAMING_SNAKE_CASE_ : TokenClassificationTask = token_classification_task_clazz()
except AttributeError:
raise ValueError(
F'Task {hparams.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. '
F'Available tasks classes are: {TokenClassificationTask.__subclasses__()}' )
SCREAMING_SNAKE_CASE_ : Optional[int] = self.token_classification_task.get_labels(hparams.labels )
SCREAMING_SNAKE_CASE_ : Optional[int] = CrossEntropyLoss().ignore_index
super().__init__(_A,len(self.labels ),self.mode )
def __UpperCamelCase ( self : Optional[Any],**_A : List[Any] ):
"""simple docstring"""
return self.model(**_A )
def __UpperCamelCase ( self : Optional[Any],_A : Optional[int],_A : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.config.model_type != "distilbert":
SCREAMING_SNAKE_CASE_ : Dict = (
batch[2] if self.config.model_type in ["bert", "xlnet"] else None
) # XLM and RoBERTa don"t use token_type_ids
SCREAMING_SNAKE_CASE_ : Tuple = self(**_A )
SCREAMING_SNAKE_CASE_ : List[Any] = outputs[0]
# tensorboard_logs = {"loss": loss, "rate": self.lr_scheduler.get_last_lr()[-1]}
return {"loss": loss}
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = self.hparams
for mode in ["train", "dev", "test"]:
SCREAMING_SNAKE_CASE_ : int = self._feature_file(_A )
if os.path.exists(_A ) and not args.overwrite_cache:
logger.info("Loading features from cached file %s",_A )
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.load(_A )
else:
logger.info("Creating features from dataset file at %s",args.data_dir )
SCREAMING_SNAKE_CASE_ : Optional[int] = self.token_classification_task.read_examples_from_file(args.data_dir,_A )
SCREAMING_SNAKE_CASE_ : List[Any] = self.token_classification_task.convert_examples_to_features(
_A,self.labels,args.max_seq_length,self.tokenizer,cls_token_at_end=bool(self.config.model_type in ["xlnet"] ),cls_token=self.tokenizer.cls_token,cls_token_segment_id=2 if self.config.model_type in ["xlnet"] else 0,sep_token=self.tokenizer.sep_token,sep_token_extra=_A,pad_on_left=bool(self.config.model_type in ["xlnet"] ),pad_token=self.tokenizer.pad_token_id,pad_token_segment_id=self.tokenizer.pad_token_type_id,pad_token_label_id=self.pad_token_label_id,)
logger.info("Saving features into cached file %s",_A )
torch.save(_A,_A )
def __UpperCamelCase ( self : Tuple,_A : int,_A : int,_A : bool = False ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = self._feature_file(_A )
logger.info("Loading features from cached file %s",_A )
SCREAMING_SNAKE_CASE_ : List[Any] = torch.load(_A )
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.tensor([f.input_ids for f in features],dtype=torch.long )
SCREAMING_SNAKE_CASE_ : str = torch.tensor([f.attention_mask for f in features],dtype=torch.long )
if features[0].token_type_ids is not None:
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.tensor([f.token_type_ids for f in features],dtype=torch.long )
else:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.tensor([0 for f in features],dtype=torch.long )
# HACK(we will not use this anymore soon)
SCREAMING_SNAKE_CASE_ : List[Any] = torch.tensor([f.label_ids for f in features],dtype=torch.long )
return DataLoader(
TensorDataset(_A,_A,_A,_A ),batch_size=_A )
def __UpperCamelCase ( self : List[Any],_A : Optional[int],_A : Optional[Any] ):
"""simple docstring"""
"""Compute validation""" ""
SCREAMING_SNAKE_CASE_ : Optional[Any] = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.config.model_type != "distilbert":
SCREAMING_SNAKE_CASE_ : Tuple = (
batch[2] if self.config.model_type in ["bert", "xlnet"] else None
) # XLM and RoBERTa don"t use token_type_ids
SCREAMING_SNAKE_CASE_ : Tuple = self(**_A )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = outputs[:2]
SCREAMING_SNAKE_CASE_ : str = logits.detach().cpu().numpy()
SCREAMING_SNAKE_CASE_ : Optional[Any] = inputs["labels"].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def __UpperCamelCase ( self : List[str],_A : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.stack([x["val_loss"] for x in outputs] ).mean()
SCREAMING_SNAKE_CASE_ : Dict = np.concatenate([x["pred"] for x in outputs],axis=0 )
SCREAMING_SNAKE_CASE_ : str = np.argmax(_A,axis=2 )
SCREAMING_SNAKE_CASE_ : List[str] = np.concatenate([x["target"] for x in outputs],axis=0 )
SCREAMING_SNAKE_CASE_ : Dict = dict(enumerate(self.labels ) )
SCREAMING_SNAKE_CASE_ : Any = [[] for _ in range(out_label_ids.shape[0] )]
SCREAMING_SNAKE_CASE_ : int = [[] for _ in range(out_label_ids.shape[0] )]
for i in range(out_label_ids.shape[0] ):
for j in range(out_label_ids.shape[1] ):
if out_label_ids[i, j] != self.pad_token_label_id:
out_label_list[i].append(label_map[out_label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
SCREAMING_SNAKE_CASE_ : List[str] = {
"val_loss": val_loss_mean,
"accuracy_score": accuracy_score(_A,_A ),
"precision": precision_score(_A,_A ),
"recall": recall_score(_A,_A ),
"f1": fa_score(_A,_A ),
}
SCREAMING_SNAKE_CASE_ : Dict = dict(results.items() )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = results
return ret, preds_list, out_label_list
def __UpperCamelCase ( self : Dict,_A : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = self._eval_end(_A )
SCREAMING_SNAKE_CASE_ : Optional[Any] = ret["log"]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def __UpperCamelCase ( self : List[str],_A : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = self._eval_end(_A )
# Converting to the dict required by pl
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master/\
# pytorch_lightning/trainer/logging.py#L139
SCREAMING_SNAKE_CASE_ : Dict = ret["log"]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def __UpperCamelCase ( _A : int,_A : Tuple ):
"""simple docstring"""
BaseTransformer.add_model_specific_args(_A,_A )
parser.add_argument(
"--task_type",default="NER",type=_A,help="Task type to fine tune in training (e.g. NER, POS, etc)" )
parser.add_argument(
"--max_seq_length",default=128,type=_A,help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
),)
parser.add_argument(
"--labels",default="",type=_A,help="Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.",)
parser.add_argument(
"--gpus",default=0,type=_A,help="The number of GPUs allocated for this, it is by default 0 meaning none",)
parser.add_argument(
"--overwrite_cache",action="store_true",help="Overwrite the cached training and evaluation sets" )
return parser
if __name__ == "__main__":
__lowerCamelCase : Dict = argparse.ArgumentParser()
add_generic_args(parser, os.getcwd())
__lowerCamelCase : Dict = NERTransformer.add_model_specific_args(parser, os.getcwd())
__lowerCamelCase : Any = parser.parse_args()
__lowerCamelCase : Union[str, Any] = NERTransformer(args)
__lowerCamelCase : Optional[int] = generic_train(model, args)
if args.do_predict:
# See https://github.com/huggingface/transformers/issues/3159
# pl use this default format to create a checkpoint:
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master\
# /pytorch_lightning/callbacks/model_checkpoint.py#L322
__lowerCamelCase : Any = sorted(glob.glob(os.path.join(args.output_dir, '''checkpoint-epoch=*.ckpt'''), recursive=True))
__lowerCamelCase : Union[str, Any] = model.load_from_checkpoint(checkpoints[-1])
trainer.test(model)
| 216 | import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class a__ ( A__ ):
A = ['image_processor', 'tokenizer']
A = 'ViTImageProcessor'
A = ('CLIPTokenizer', 'CLIPTokenizerFast')
def __init__( self : List[str],_A : Optional[Any]=None,_A : List[str]=None,**_A : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead.",_A,)
SCREAMING_SNAKE_CASE_ : List[str] = kwargs.pop("feature_extractor" )
SCREAMING_SNAKE_CASE_ : List[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(_A,_A )
def __call__( self : Optional[Any],_A : Any=None,_A : Tuple=None,_A : Dict=None,_A : Optional[Any]=None,**_A : int ):
"""simple docstring"""
if text is None and visual_prompt is None and images is None:
raise ValueError("You have to specify either text, visual prompt or images." )
if text is not None and visual_prompt is not None:
raise ValueError("You have to specify exactly one type of prompt. Either text or visual prompt." )
if text is not None:
SCREAMING_SNAKE_CASE_ : int = self.tokenizer(_A,return_tensors=_A,**_A )
if visual_prompt is not None:
SCREAMING_SNAKE_CASE_ : List[Any] = self.image_processor(_A,return_tensors=_A,**_A )
if images is not None:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.image_processor(_A,return_tensors=_A,**_A )
if visual_prompt is not None and images is not None:
SCREAMING_SNAKE_CASE_ : List[str] = {
"pixel_values": image_features.pixel_values,
"conditional_pixel_values": prompt_features.pixel_values,
}
return encoding
elif text is not None and images is not None:
SCREAMING_SNAKE_CASE_ : int = image_features.pixel_values
return encoding
elif text is not None:
return encoding
elif visual_prompt is not None:
SCREAMING_SNAKE_CASE_ : str = {
"conditional_pixel_values": prompt_features.pixel_values,
}
return encoding
else:
return BatchEncoding(data=dict(**_A ),tensor_type=_A )
def __UpperCamelCase ( self : int,*_A : Optional[Any],**_A : Dict ):
"""simple docstring"""
return self.tokenizer.batch_decode(*_A,**_A )
def __UpperCamelCase ( self : Tuple,*_A : Dict,**_A : Optional[int] ):
"""simple docstring"""
return self.tokenizer.decode(*_A,**_A )
@property
def __UpperCamelCase ( self : Any ):
"""simple docstring"""
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.",_A,)
return self.image_processor_class
@property
def __UpperCamelCase ( self : int ):
"""simple docstring"""
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.",_A,)
return self.image_processor
| 216 | 1 |
"""simple docstring"""
def __UpperCAmelCase ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[int] ) -> Optional[int]:
'''simple docstring'''
while a != 0:
__snake_case , __snake_case : List[Any] = b % a, a
return b
def __UpperCAmelCase ( UpperCAmelCase_ : str , UpperCAmelCase_ : str ) -> Tuple:
'''simple docstring'''
if gcd(UpperCAmelCase__ , UpperCAmelCase__ ) != 1:
__snake_case : Tuple = F"mod inverse of {a!r} and {m!r} does not exist"
raise ValueError(UpperCAmelCase__ )
__snake_case , __snake_case , __snake_case : List[str] = 1, 0, a
__snake_case , __snake_case , __snake_case : str = 0, 1, m
while va != 0:
__snake_case : Union[str, Any] = ua // va
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case : int = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 714 | """simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
_a : Optional[int]= logging.get_logger(__name__)
_a : Tuple= {
"microsoft/layoutlmv3-base": "https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json",
}
class UpperCamelCase ( lowercase ):
UpperCAmelCase : Dict = """layoutlmv3"""
def __init__(self : List[str] , _A : Optional[int]=5_02_65 , _A : List[str]=7_68 , _A : List[Any]=12 , _A : List[str]=12 , _A : Optional[int]=30_72 , _A : str="gelu" , _A : int=0.1 , _A : Tuple=0.1 , _A : List[Any]=5_12 , _A : List[str]=2 , _A : List[Any]=0.02 , _A : Tuple=1E-5 , _A : Dict=1 , _A : str=0 , _A : str=2 , _A : List[str]=10_24 , _A : Optional[Any]=1_28 , _A : Union[str, Any]=1_28 , _A : Union[str, Any]=True , _A : Union[str, Any]=32 , _A : Any=1_28 , _A : Optional[Any]=64 , _A : List[Any]=2_56 , _A : str=True , _A : List[Any]=True , _A : Tuple=True , _A : Tuple=2_24 , _A : Tuple=3 , _A : Optional[Any]=16 , _A : Tuple=None , **_A : Optional[int] , ) -> Optional[int]:
super().__init__(
vocab_size=_A , hidden_size=_A , num_hidden_layers=_A , num_attention_heads=_A , intermediate_size=_A , hidden_act=_A , hidden_dropout_prob=_A , attention_probs_dropout_prob=_A , max_position_embeddings=_A , type_vocab_size=_A , initializer_range=_A , layer_norm_eps=_A , pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , **_A , )
__snake_case : List[Any] = max_ad_position_embeddings
__snake_case : List[str] = coordinate_size
__snake_case : int = shape_size
__snake_case : List[str] = has_relative_attention_bias
__snake_case : Union[str, Any] = rel_pos_bins
__snake_case : Tuple = max_rel_pos
__snake_case : Optional[Any] = has_spatial_attention_bias
__snake_case : Union[str, Any] = rel_ad_pos_bins
__snake_case : Any = max_rel_ad_pos
__snake_case : Tuple = text_embed
__snake_case : str = visual_embed
__snake_case : List[str] = input_size
__snake_case : List[Any] = num_channels
__snake_case : Union[str, Any] = patch_size
__snake_case : str = classifier_dropout
class UpperCamelCase ( lowercase ):
UpperCAmelCase : Optional[int] = version.parse("""1.12""" )
@property
def _lowercase (self : Optional[int]) -> Mapping[str, Mapping[int, str]]:
# The order of inputs is different for question answering and sequence classification
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
('attention_mask', {0: 'batch', 1: 'sequence'}),
('bbox', {0: 'batch', 1: 'sequence'}),
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
])
else:
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
('bbox', {0: 'batch', 1: 'sequence'}),
('attention_mask', {0: 'batch', 1: 'sequence'}),
('pixel_values', {0: 'batch', 1: 'num_channels'}),
])
@property
def _lowercase (self : Tuple) -> float:
return 1E-5
@property
def _lowercase (self : Optional[int]) -> int:
return 12
def _lowercase (self : str , _A : "ProcessorMixin" , _A : int = -1 , _A : int = -1 , _A : bool = False , _A : Optional["TensorType"] = None , _A : int = 3 , _A : int = 40 , _A : int = 40 , ) -> Mapping[str, Any]:
setattr(processor.image_processor , 'apply_ocr' , _A)
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
__snake_case : Union[str, Any] = compute_effective_axis_dimension(
_A , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0)
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__snake_case : Any = processor.tokenizer.num_special_tokens_to_add(_A)
__snake_case : Optional[int] = compute_effective_axis_dimension(
_A , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_A)
# Generate dummy inputs according to compute batch and sequence
__snake_case : Optional[int] = [[' '.join([processor.tokenizer.unk_token]) * seq_length]] * batch_size
# Generate dummy bounding boxes
__snake_case : Dict = [[[48, 84, 73, 1_28]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
__snake_case : Optional[Any] = self._generate_dummy_images(_A , _A , _A , _A)
__snake_case : Dict = dict(
processor(
_A , text=_A , boxes=_A , return_tensors=_A , ))
return inputs
| 192 | 0 |
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = (DDIMParallelScheduler,)
lowerCamelCase_ = (("eta", 0.0), ("num_inference_steps", 50))
def _snake_case ( self :List[Any] , **__A :List[str] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = {
"""num_train_timesteps""": 1000,
"""beta_start""": 0.0_0_0_1,
"""beta_end""": 0.0_2,
"""beta_schedule""": """linear""",
"""clip_sample""": True,
}
config.update(**__A )
return config
def _snake_case ( self :int , **__A :List[str] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ = self.get_scheduler_config(**__A )
SCREAMING_SNAKE_CASE__ = scheduler_class(**__A )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = 10, 0.0
SCREAMING_SNAKE_CASE__ = self.dummy_model()
SCREAMING_SNAKE_CASE__ = self.dummy_sample_deter
scheduler.set_timesteps(__A )
for t in scheduler.timesteps:
SCREAMING_SNAKE_CASE__ = model(__A , __A )
SCREAMING_SNAKE_CASE__ = scheduler.step(__A , __A , __A , __A ).prev_sample
return sample
def _snake_case ( self :Tuple ) -> Optional[int]:
"""simple docstring"""
for timesteps in [100, 500, 1000]:
self.check_over_configs(num_train_timesteps=__A )
def _snake_case ( self :Union[str, Any] ) -> Tuple:
"""simple docstring"""
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=__A )
SCREAMING_SNAKE_CASE__ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ = self.get_scheduler_config(steps_offset=1 )
SCREAMING_SNAKE_CASE__ = scheduler_class(**__A )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps , torch.LongTensor([801, 601, 401, 201, 1] ) )
def _snake_case ( self :List[str] ) -> Any:
"""simple docstring"""
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2] ):
self.check_over_configs(beta_start=__A , beta_end=__A )
def _snake_case ( self :Dict ) -> Optional[Any]:
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=__A )
def _snake_case ( self :int ) -> List[Any]:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__A )
def _snake_case ( self :Optional[Any] ) -> List[Any]:
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__A )
def _snake_case ( self :Any ) -> Any:
"""simple docstring"""
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=__A )
def _snake_case ( self :List[Any] ) -> Tuple:
"""simple docstring"""
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=__A )
def _snake_case ( self :Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
self.check_over_configs(thresholding=__A )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=__A , prediction_type=__A , sample_max_value=__A , )
def _snake_case ( self :Optional[int] ) -> List[Any]:
"""simple docstring"""
for t in [1, 10, 49]:
self.check_over_forward(time_step=__A )
def _snake_case ( self :str ) -> Tuple:
"""simple docstring"""
for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 500] ):
self.check_over_forward(time_step=__A , num_inference_steps=__A )
def _snake_case ( self :Tuple ) -> str:
"""simple docstring"""
for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=__A , eta=__A )
def _snake_case ( self :List[str] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ = self.get_scheduler_config()
SCREAMING_SNAKE_CASE__ = scheduler_class(**__A )
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(420 , 400 ) - 0.1_4_7_7_1 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(980 , 960 ) - 0.3_2_4_6_0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 , 486 ) - 0.0_0_9_7_9 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 , 998 ) - 0.0_2 ) ) < 1E-5
def _snake_case ( self :int ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ = self.get_scheduler_config()
SCREAMING_SNAKE_CASE__ = scheduler_class(**__A )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = 10, 0.0
scheduler.set_timesteps(__A )
SCREAMING_SNAKE_CASE__ = self.dummy_model()
SCREAMING_SNAKE_CASE__ = self.dummy_sample_deter
SCREAMING_SNAKE_CASE__ = self.dummy_sample_deter + 0.1
SCREAMING_SNAKE_CASE__ = self.dummy_sample_deter - 0.1
SCREAMING_SNAKE_CASE__ = samplea.shape[0]
SCREAMING_SNAKE_CASE__ = torch.stack([samplea, samplea, samplea] , dim=0 )
SCREAMING_SNAKE_CASE__ = torch.arange(__A )[0:3, None].repeat(1 , __A )
SCREAMING_SNAKE_CASE__ = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
SCREAMING_SNAKE_CASE__ = scheduler.batch_step_no_noise(__A , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , __A )
SCREAMING_SNAKE_CASE__ = torch.sum(torch.abs(__A ) )
SCREAMING_SNAKE_CASE__ = torch.mean(torch.abs(__A ) )
assert abs(result_sum.item() - 1_1_4_7.7_9_0_4 ) < 1E-2
assert abs(result_mean.item() - 0.4_9_8_2 ) < 1E-3
def _snake_case ( self :Tuple ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.full_loop()
SCREAMING_SNAKE_CASE__ = torch.sum(torch.abs(__A ) )
SCREAMING_SNAKE_CASE__ = torch.mean(torch.abs(__A ) )
assert abs(result_sum.item() - 1_7_2.0_0_6_7 ) < 1E-2
assert abs(result_mean.item() - 0.2_2_3_9_6_7 ) < 1E-3
def _snake_case ( self :Optional[Any] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.full_loop(prediction_type="""v_prediction""" )
SCREAMING_SNAKE_CASE__ = torch.sum(torch.abs(__A ) )
SCREAMING_SNAKE_CASE__ = torch.mean(torch.abs(__A ) )
assert abs(result_sum.item() - 5_2.5_3_0_2 ) < 1E-2
assert abs(result_mean.item() - 0.0_6_8_4 ) < 1E-3
def _snake_case ( self :Any ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.full_loop(set_alpha_to_one=__A , beta_start=0.0_1 )
SCREAMING_SNAKE_CASE__ = torch.sum(torch.abs(__A ) )
SCREAMING_SNAKE_CASE__ = torch.mean(torch.abs(__A ) )
assert abs(result_sum.item() - 1_4_9.8_2_9_5 ) < 1E-2
assert abs(result_mean.item() - 0.1_9_5_1 ) < 1E-3
def _snake_case ( self :Tuple ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.full_loop(set_alpha_to_one=__A , beta_start=0.0_1 )
SCREAMING_SNAKE_CASE__ = torch.sum(torch.abs(__A ) )
SCREAMING_SNAKE_CASE__ = torch.mean(torch.abs(__A ) )
assert abs(result_sum.item() - 1_4_9.0_7_8_4 ) < 1E-2
assert abs(result_mean.item() - 0.1_9_4_1 ) < 1E-3 | 6 |
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCamelCase_ ( UpperCamelCase__ ):
def __init__( self :Union[str, Any] , __A :Optional[int] , __A :Tuple=13 , __A :Dict=7 , __A :Dict=True , __A :str=True , __A :Optional[Any]=True , __A :Optional[Any]=True , __A :Optional[Any]=True , __A :Any=False , __A :Dict=False , __A :Any=False , __A :Tuple=2 , __A :Dict=99 , __A :Optional[Any]=0 , __A :List[str]=32 , __A :Optional[int]=5 , __A :Dict=4 , __A :List[str]=0.1 , __A :Union[str, Any]=0.1 , __A :Tuple=512 , __A :Any=12 , __A :Optional[int]=2 , __A :Union[str, Any]=0.0_2 , __A :Dict=3 , __A :Optional[int]=4 , __A :Any="last" , __A :List[Any]=None , __A :Any=None , ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = parent
SCREAMING_SNAKE_CASE__ = batch_size
SCREAMING_SNAKE_CASE__ = seq_length
SCREAMING_SNAKE_CASE__ = is_training
SCREAMING_SNAKE_CASE__ = use_input_lengths
SCREAMING_SNAKE_CASE__ = use_token_type_ids
SCREAMING_SNAKE_CASE__ = use_labels
SCREAMING_SNAKE_CASE__ = gelu_activation
SCREAMING_SNAKE_CASE__ = sinusoidal_embeddings
SCREAMING_SNAKE_CASE__ = causal
SCREAMING_SNAKE_CASE__ = asm
SCREAMING_SNAKE_CASE__ = n_langs
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = n_special
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = type_vocab_size
SCREAMING_SNAKE_CASE__ = type_sequence_label_size
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = num_labels
SCREAMING_SNAKE_CASE__ = num_choices
SCREAMING_SNAKE_CASE__ = summary_type
SCREAMING_SNAKE_CASE__ = use_proj
SCREAMING_SNAKE_CASE__ = scope
def _snake_case ( self :Optional[Any] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE__ = None
if self.use_input_lengths:
SCREAMING_SNAKE_CASE__ = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
SCREAMING_SNAKE_CASE__ = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , 2 ).float()
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE__ = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _snake_case ( self :List[str] ) -> Optional[int]:
"""simple docstring"""
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def _snake_case ( self :Tuple , __A :str , __A :int , __A :Optional[int] , __A :Any , __A :Union[str, Any] , __A :Optional[int] , __A :Union[str, Any] , __A :Union[str, Any] , __A :str , ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = FlaubertModel(config=__A )
model.to(__A )
model.eval()
SCREAMING_SNAKE_CASE__ = model(__A , lengths=__A , langs=__A )
SCREAMING_SNAKE_CASE__ = model(__A , langs=__A )
SCREAMING_SNAKE_CASE__ = model(__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _snake_case ( self :str , __A :Any , __A :str , __A :Union[str, Any] , __A :Optional[Any] , __A :Optional[int] , __A :Any , __A :Union[str, Any] , __A :Optional[Any] , __A :Union[str, Any] , ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = FlaubertWithLMHeadModel(__A )
model.to(__A )
model.eval()
SCREAMING_SNAKE_CASE__ = model(__A , token_type_ids=__A , labels=__A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _snake_case ( self :Tuple , __A :Union[str, Any] , __A :Optional[Any] , __A :Dict , __A :Dict , __A :Union[str, Any] , __A :List[str] , __A :Optional[int] , __A :int , __A :str , ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = FlaubertForQuestionAnsweringSimple(__A )
model.to(__A )
model.eval()
SCREAMING_SNAKE_CASE__ = model(__A )
SCREAMING_SNAKE_CASE__ = model(__A , start_positions=__A , end_positions=__A )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _snake_case ( self :List[str] , __A :Any , __A :int , __A :Tuple , __A :Optional[Any] , __A :Tuple , __A :Optional[int] , __A :str , __A :int , __A :str , ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = FlaubertForQuestionAnswering(__A )
model.to(__A )
model.eval()
SCREAMING_SNAKE_CASE__ = model(__A )
SCREAMING_SNAKE_CASE__ = model(
__A , start_positions=__A , end_positions=__A , cls_index=__A , is_impossible=__A , p_mask=__A , )
SCREAMING_SNAKE_CASE__ = model(
__A , start_positions=__A , end_positions=__A , cls_index=__A , is_impossible=__A , )
((SCREAMING_SNAKE_CASE__) , ) = result_with_labels.to_tuple()
SCREAMING_SNAKE_CASE__ = model(__A , start_positions=__A , end_positions=__A )
((SCREAMING_SNAKE_CASE__) , ) = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def _snake_case ( self :Optional[int] , __A :str , __A :Optional[int] , __A :Tuple , __A :Dict , __A :List[str] , __A :Tuple , __A :List[str] , __A :Dict , __A :List[str] , ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = FlaubertForSequenceClassification(__A )
model.to(__A )
model.eval()
SCREAMING_SNAKE_CASE__ = model(__A )
SCREAMING_SNAKE_CASE__ = model(__A , labels=__A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _snake_case ( self :Optional[Any] , __A :Optional[Any] , __A :Optional[Any] , __A :List[str] , __A :Optional[Any] , __A :int , __A :Tuple , __A :Optional[int] , __A :Union[str, Any] , __A :Dict , ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.num_labels
SCREAMING_SNAKE_CASE__ = FlaubertForTokenClassification(__A )
model.to(__A )
model.eval()
SCREAMING_SNAKE_CASE__ = model(__A , attention_mask=__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _snake_case ( self :str , __A :Any , __A :Tuple , __A :List[str] , __A :Tuple , __A :Any , __A :int , __A :Dict , __A :List[str] , __A :Tuple , ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.num_choices
SCREAMING_SNAKE_CASE__ = FlaubertForMultipleChoice(config=__A )
model.to(__A )
model.eval()
SCREAMING_SNAKE_CASE__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE__ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE__ = model(
__A , attention_mask=__A , token_type_ids=__A , labels=__A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _snake_case ( self :Union[str, Any] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) ,
) = config_and_inputs
SCREAMING_SNAKE_CASE__ = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""lengths""": input_lengths,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_torch
class UpperCamelCase_ ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
lowerCamelCase_ = (
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
lowerCamelCase_ = (
{
"feature-extraction": FlaubertModel,
"fill-mask": FlaubertWithLMHeadModel,
"question-answering": FlaubertForQuestionAnsweringSimple,
"text-classification": FlaubertForSequenceClassification,
"token-classification": FlaubertForTokenClassification,
"zero-shot": FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def _snake_case ( self :Any , __A :Optional[int] , __A :Optional[int] , __A :Dict , __A :List[Any] , __A :Tuple ) -> str:
"""simple docstring"""
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("""Fast""" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _snake_case ( self :Tuple , __A :List[str] , __A :Optional[int] , __A :Dict=False ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = super()._prepare_for_class(__A , __A , return_labels=__A )
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
SCREAMING_SNAKE_CASE__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__A )
SCREAMING_SNAKE_CASE__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__A )
return inputs_dict
def _snake_case ( self :str ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = FlaubertModelTester(self )
SCREAMING_SNAKE_CASE__ = ConfigTester(self , config_class=__A , emb_dim=37 )
def _snake_case ( self :int ) -> int:
"""simple docstring"""
self.config_tester.run_common_tests()
def _snake_case ( self :Optional[Any] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*__A )
def _snake_case ( self :Tuple ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*__A )
def _snake_case ( self :str ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*__A )
def _snake_case ( self :Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*__A )
def _snake_case ( self :str ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*__A )
def _snake_case ( self :Any ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*__A )
def _snake_case ( self :Any ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*__A )
@slow
def _snake_case ( self :Union[str, Any] ) -> List[str]:
"""simple docstring"""
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ = FlaubertModel.from_pretrained(__A )
self.assertIsNotNone(__A )
@slow
@require_torch_gpu
def _snake_case ( self :Tuple ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = model_class(config=__A )
SCREAMING_SNAKE_CASE__ = self._prepare_for_class(__A , __A )
SCREAMING_SNAKE_CASE__ = torch.jit.trace(
__A , (inputs_dict["""input_ids"""].to("""cpu""" ), inputs_dict["""attention_mask"""].to("""cpu""" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(__A , os.path.join(__A , """traced_model.pt""" ) )
SCREAMING_SNAKE_CASE__ = torch.jit.load(os.path.join(__A , """traced_model.pt""" ) , map_location=__A )
loaded(inputs_dict["""input_ids"""].to(__A ) , inputs_dict["""attention_mask"""].to(__A ) )
@require_torch
class UpperCamelCase_ ( unittest.TestCase ):
@slow
def _snake_case ( self :Dict ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = FlaubertModel.from_pretrained("""flaubert/flaubert_base_cased""" )
SCREAMING_SNAKE_CASE__ = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ = model(__A )[0]
SCREAMING_SNAKE_CASE__ = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , __A )
SCREAMING_SNAKE_CASE__ = torch.tensor(
[[[-2.6_2_5_1, -1.4_2_9_8, -0.0_2_2_7], [-2.8_5_1_0, -1.6_3_8_7, 0.2_2_5_8], [-2.8_1_1_4, -1.1_8_3_2, -0.3_0_6_6]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __A , atol=1E-4 ) ) | 6 | 1 |
"""simple docstring"""
from typing import Dict, Optional
import numpy as np
import datasets
_SCREAMING_SNAKE_CASE : Tuple = '''
IoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union
between the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,
the mean IoU of the image is calculated by taking the IoU of each class and averaging them.
'''
_SCREAMING_SNAKE_CASE : Any = '''
Args:
predictions (`List[ndarray]`):
List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.
references (`List[ndarray]`):
List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.
num_labels (`int`):
Number of classes (categories).
ignore_index (`int`):
Index that will be ignored during evaluation.
nan_to_num (`int`, *optional*):
If specified, NaN values will be replaced by the number defined by the user.
label_map (`dict`, *optional*):
If specified, dictionary mapping old label indices to new label indices.
reduce_labels (`bool`, *optional*, defaults to `False`):
Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,
and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.
Returns:
`Dict[str, float | ndarray]` comprising various elements:
- *mean_iou* (`float`):
Mean Intersection-over-Union (IoU averaged over all categories).
- *mean_accuracy* (`float`):
Mean accuracy (averaged over all categories).
- *overall_accuracy* (`float`):
Overall accuracy on all images.
- *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):
Per category accuracy.
- *per_category_iou* (`ndarray` of shape `(num_labels,)`):
Per category IoU.
Examples:
>>> import numpy as np
>>> mean_iou = datasets.load_metric("mean_iou")
>>> # suppose one has 3 different segmentation maps predicted
>>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])
>>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])
>>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])
>>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])
>>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])
>>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])
>>> predicted = [predicted_1, predicted_2, predicted_3]
>>> ground_truth = [actual_1, actual_2, actual_3]
>>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{\'mean_iou\': 0.47750000000000004, \'mean_accuracy\': 0.5916666666666666, \'overall_accuracy\': 0.5263157894736842, \'per_category_iou\': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), \'per_category_accuracy\': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}
'''
_SCREAMING_SNAKE_CASE : str = '''\
@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,
author = {{MMSegmentation Contributors}},
license = {Apache-2.0},
month = {7},
title = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},
url = {https://github.com/open-mmlab/mmsegmentation},
year = {2020}
}'''
def lowerCamelCase__ ( _lowerCamelCase : Dict , _lowerCamelCase : Dict , _lowerCamelCase : Tuple , _lowerCamelCase : bool , _lowerCamelCase : Optional[Dict[int, int]] = None , _lowerCamelCase : bool = False , ) -> str:
if label_map is not None:
for old_id, new_id in label_map.items():
lowerCamelCase_ = new_id
# turn into Numpy arrays
lowerCamelCase_ = np.array(_lowerCamelCase )
lowerCamelCase_ = np.array(_lowerCamelCase )
if reduce_labels:
lowerCamelCase_ = 255
lowerCamelCase_ = label - 1
lowerCamelCase_ = 255
lowerCamelCase_ = label != ignore_index
lowerCamelCase_ = np.not_equal(_lowerCamelCase , _lowerCamelCase )
lowerCamelCase_ = pred_label[mask]
lowerCamelCase_ = np.array(_lowerCamelCase )[mask]
lowerCamelCase_ = pred_label[pred_label == label]
lowerCamelCase_ = np.histogram(_lowerCamelCase , bins=_lowerCamelCase , range=(0, num_labels - 1) )[0]
lowerCamelCase_ = np.histogram(_lowerCamelCase , bins=_lowerCamelCase , range=(0, num_labels - 1) )[0]
lowerCamelCase_ = np.histogram(_lowerCamelCase , bins=_lowerCamelCase , range=(0, num_labels - 1) )[0]
lowerCamelCase_ = area_pred_label + area_label - area_intersect
return area_intersect, area_union, area_pred_label, area_label
def lowerCamelCase__ ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : int , _lowerCamelCase : bool , _lowerCamelCase : Optional[Dict[int, int]] = None , _lowerCamelCase : bool = False , ) -> Tuple:
lowerCamelCase_ = np.zeros((num_labels,) , dtype=np.floataa )
lowerCamelCase_ = np.zeros((num_labels,) , dtype=np.floataa )
lowerCamelCase_ = np.zeros((num_labels,) , dtype=np.floataa )
lowerCamelCase_ = np.zeros((num_labels,) , dtype=np.floataa )
for result, gt_seg_map in zip(_lowerCamelCase , _lowerCamelCase ):
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = intersect_and_union(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
total_area_intersect += area_intersect
total_area_union += area_union
total_area_pred_label += area_pred_label
total_area_label += area_label
return total_area_intersect, total_area_union, total_area_pred_label, total_area_label
def lowerCamelCase__ ( _lowerCamelCase : List[str] , _lowerCamelCase : int , _lowerCamelCase : Optional[int] , _lowerCamelCase : bool , _lowerCamelCase : Optional[int] = None , _lowerCamelCase : Optional[Dict[int, int]] = None , _lowerCamelCase : bool = False , ) -> Tuple:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = total_intersect_and_union(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# compute metrics
lowerCamelCase_ = {}
lowerCamelCase_ = total_area_intersect.sum() / total_area_label.sum()
lowerCamelCase_ = total_area_intersect / total_area_union
lowerCamelCase_ = total_area_intersect / total_area_label
lowerCamelCase_ = np.nanmean(_lowerCamelCase )
lowerCamelCase_ = np.nanmean(_lowerCamelCase )
lowerCamelCase_ = all_acc
lowerCamelCase_ = iou
lowerCamelCase_ = acc
if nan_to_num is not None:
lowerCamelCase_ = {metric: np.nan_to_num(_lowerCamelCase , nan=_lowerCamelCase ) for metric, metric_value in metrics.items()}
return metrics
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a ( datasets.Metric ):
def UpperCamelCase ( self : Optional[int] ) -> str:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
# 1st Seq - height dim, 2nd - width dim
{
'predictions': datasets.Sequence(datasets.Sequence(datasets.Value('uint16' ) ) ),
'references': datasets.Sequence(datasets.Sequence(datasets.Value('uint16' ) ) ),
} ) , reference_urls=[
'https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py'
] , )
def UpperCamelCase ( self : Any , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : bool , __SCREAMING_SNAKE_CASE : Optional[int] = None , __SCREAMING_SNAKE_CASE : Optional[Dict[int, int]] = None , __SCREAMING_SNAKE_CASE : bool = False , ) -> Dict:
lowerCamelCase_ = mean_iou(
results=__SCREAMING_SNAKE_CASE , gt_seg_maps=__SCREAMING_SNAKE_CASE , num_labels=__SCREAMING_SNAKE_CASE , ignore_index=__SCREAMING_SNAKE_CASE , nan_to_num=__SCREAMING_SNAKE_CASE , label_map=__SCREAMING_SNAKE_CASE , reduce_labels=__SCREAMING_SNAKE_CASE , )
return iou_result
| 702 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class a ( unittest.TestCase ):
def __init__( self : Any , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int=7 , __SCREAMING_SNAKE_CASE : Tuple=3 , __SCREAMING_SNAKE_CASE : Tuple=18 , __SCREAMING_SNAKE_CASE : str=30 , __SCREAMING_SNAKE_CASE : Tuple=400 , __SCREAMING_SNAKE_CASE : Union[str, Any]=True , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , __SCREAMING_SNAKE_CASE : str=True , __SCREAMING_SNAKE_CASE : Any=None , __SCREAMING_SNAKE_CASE : Optional[int]=True , __SCREAMING_SNAKE_CASE : str=[0.5, 0.5, 0.5] , __SCREAMING_SNAKE_CASE : Tuple=[0.5, 0.5, 0.5] , ) -> Tuple:
lowerCamelCase_ = size if size is not None else {'shortest_edge': 18}
lowerCamelCase_ = crop_size if crop_size is not None else {'height': 18, 'width': 18}
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = num_channels
lowerCamelCase_ = image_size
lowerCamelCase_ = min_resolution
lowerCamelCase_ = max_resolution
lowerCamelCase_ = do_resize
lowerCamelCase_ = size
lowerCamelCase_ = do_center_crop
lowerCamelCase_ = crop_size
lowerCamelCase_ = do_normalize
lowerCamelCase_ = image_mean
lowerCamelCase_ = image_std
def UpperCamelCase ( self : int ) -> Tuple:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"do_center_crop": self.do_center_crop,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class a ( __snake_case , unittest.TestCase ):
SCREAMING_SNAKE_CASE : Union[str, Any] = LevitImageProcessor if is_vision_available() else None
def UpperCamelCase ( self : Optional[Any] ) -> Tuple:
lowerCamelCase_ = LevitImageProcessingTester(self )
@property
def UpperCamelCase ( self : Any ) -> List[str]:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase ( self : List[Any] ) -> Tuple:
lowerCamelCase_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , 'image_mean' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , 'image_std' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , 'do_normalize' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , 'do_resize' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , 'do_center_crop' ) )
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , 'size' ) )
def UpperCamelCase ( self : Union[str, Any] ) -> Tuple:
lowerCamelCase_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 18} )
self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18} )
lowerCamelCase_ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'shortest_edge': 42} )
self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} )
def UpperCamelCase ( self : Optional[Any] ) -> str:
pass
def UpperCamelCase ( self : int ) -> Optional[Any]:
# Initialize image_processing
lowerCamelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
lowerCamelCase_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowerCamelCase_ = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def UpperCamelCase ( self : List[str] ) -> List[Any]:
# Initialize image_processing
lowerCamelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCamelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , numpify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
lowerCamelCase_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowerCamelCase_ = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def UpperCamelCase ( self : Union[str, Any] ) -> Optional[int]:
# Initialize image_processing
lowerCamelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCamelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , torchify=__SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
lowerCamelCase_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowerCamelCase_ = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
| 137 | 0 |
'''simple docstring'''
import argparse
import json
import re
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileNetVaConfig,
MobileNetVaForImageClassification,
MobileNetVaImageProcessor,
load_tf_weights_in_mobilenet_va,
)
from transformers.utils import logging
logging.set_verbosity_info()
a_ : int = logging.get_logger(__name__)
def __snake_case ( UpperCAmelCase_ : Tuple ):
lowerCamelCase_ = MobileNetVaConfig(layer_norm_eps=0.001 )
if "_quant" in model_name:
raise ValueError("Quantized models are not supported." )
lowerCamelCase_ = re.match(r"^mobilenet_v1_([^_]*)_([^_]*)$" , UpperCAmelCase_ )
if matches:
lowerCamelCase_ = float(matches[1] )
lowerCamelCase_ = int(matches[2] )
# The TensorFlow version of MobileNetV1 predicts 1001 classes instead of
# the usual 1000. The first class (index 0) is "background".
lowerCamelCase_ = 1001
lowerCamelCase_ = "imagenet-1k-id2label.json"
lowerCamelCase_ = "huggingface/label-files"
lowerCamelCase_ = json.load(open(hf_hub_download(UpperCAmelCase_ , UpperCAmelCase_ , repo_type="dataset" ) , "r" ) )
lowerCamelCase_ = {int(UpperCAmelCase_ ) + 1: v for k, v in idalabel.items()}
lowerCamelCase_ = "background"
lowerCamelCase_ = idalabel
lowerCamelCase_ = {v: k for k, v in idalabel.items()}
return config
def __snake_case ( ):
lowerCamelCase_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowerCamelCase_ = Image.open(requests.get(UpperCAmelCase_ , stream=UpperCAmelCase_ ).raw )
return im
@torch.no_grad()
def __snake_case ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Union[str, Any]=False ):
lowerCamelCase_ = get_mobilenet_va_config(UpperCAmelCase_ )
# Load 🤗 model
lowerCamelCase_ = MobileNetVaForImageClassification(UpperCAmelCase_ ).eval()
# Load weights from TensorFlow checkpoint
load_tf_weights_in_mobilenet_va(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# Check outputs on an image, prepared by MobileNetV1ImageProcessor
lowerCamelCase_ = MobileNetVaImageProcessor(
crop_size={"width": config.image_size, "height": config.image_size} , size={"shortest_edge": config.image_size + 32} , )
lowerCamelCase_ = image_processor(images=prepare_img() , return_tensors="pt" )
lowerCamelCase_ = model(**UpperCAmelCase_ )
lowerCamelCase_ = outputs.logits
assert logits.shape == (1, 1001)
if model_name == "mobilenet_v1_1.0_224":
lowerCamelCase_ = torch.tensor([-4.1739, -1.1233, 3.1205] )
elif model_name == "mobilenet_v1_0.75_192":
lowerCamelCase_ = torch.tensor([-3.9440, -2.3141, -0.3333] )
else:
lowerCamelCase_ = None
if expected_logits is not None:
assert torch.allclose(logits[0, :3] , UpperCAmelCase_ , atol=1E-4 )
Path(UpperCAmelCase_ ).mkdir(exist_ok=UpperCAmelCase_ )
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(UpperCAmelCase_ )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(UpperCAmelCase_ )
if push_to_hub:
print("Pushing to the hub..." )
lowerCamelCase_ = "google/" + model_name
image_processor.push_to_hub(UpperCAmelCase_ )
model.push_to_hub(UpperCAmelCase_ )
if __name__ == "__main__":
a_ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""mobilenet_v1_1.0_224""",
type=str,
help="""Name of the MobileNetV1 model you'd like to convert. Should in the form 'mobilenet_v1_<depth>_<size>'.""",
)
parser.add_argument(
"""--checkpoint_path""", required=True, type=str, help="""Path to the original TensorFlow checkpoint (.ckpt file)."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
a_ : Optional[int] = parser.parse_args()
convert_movilevit_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 675 |
'''simple docstring'''
import argparse
from collections import defaultdict
import yaml
a_ : int = """docs/source/en/_toctree.yml"""
def __snake_case ( UpperCAmelCase_ : Optional[int] ):
lowerCamelCase_ = defaultdict(UpperCAmelCase_ )
lowerCamelCase_ = []
lowerCamelCase_ = []
for doc in doc_list:
if "local" in doc:
counts[doc["local"]] += 1
if doc["title"].lower() == "overview":
overview_doc.append({"local": doc["local"], "title": doc["title"]} )
else:
new_doc_list.append(UpperCAmelCase_ )
lowerCamelCase_ = new_doc_list
lowerCamelCase_ = [key for key, value in counts.items() if value > 1]
lowerCamelCase_ = []
for duplicate_key in duplicates:
lowerCamelCase_ = list({doc["title"] for doc in doc_list if doc["local"] == duplicate_key} )
if len(UpperCAmelCase_ ) > 1:
raise ValueError(
F'''{duplicate_key} is present several times in the documentation table of content at '''
"`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the "
"others." )
# Only add this once
new_doc.append({"local": duplicate_key, "title": titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in doc_list if "local" not in counts or counts[doc["local"]] == 1] )
lowerCamelCase_ = sorted(UpperCAmelCase_ , key=lambda UpperCAmelCase_ : s["title"].lower() )
# "overview" gets special treatment and is always first
if len(UpperCAmelCase_ ) > 1:
raise ValueError("{doc_list} has two 'overview' docs which is not allowed." )
overview_doc.extend(UpperCAmelCase_ )
# Sort
return overview_doc
def __snake_case ( UpperCAmelCase_ : List[str]=False ):
with open(UpperCAmelCase_ , encoding="utf-8" ) as f:
lowerCamelCase_ = yaml.safe_load(f.read() )
# Get to the API doc
lowerCamelCase_ = 0
while content[api_idx]["title"] != "API":
api_idx += 1
lowerCamelCase_ = content[api_idx]["sections"]
# Then to the model doc
lowerCamelCase_ = 0
while api_doc[scheduler_idx]["title"] != "Schedulers":
scheduler_idx += 1
lowerCamelCase_ = api_doc[scheduler_idx]["sections"]
lowerCamelCase_ = clean_doc_toc(UpperCAmelCase_ )
lowerCamelCase_ = False
if new_scheduler_doc != scheduler_doc:
lowerCamelCase_ = True
if overwrite:
lowerCamelCase_ = new_scheduler_doc
if diff:
if overwrite:
lowerCamelCase_ = api_doc
with open(UpperCAmelCase_ , "w" , encoding="utf-8" ) as f:
f.write(yaml.dump(UpperCAmelCase_ , allow_unicode=UpperCAmelCase_ ) )
else:
raise ValueError(
"The model doc part of the table of content is not properly sorted, run `make style` to fix this." )
def __snake_case ( UpperCAmelCase_ : List[Any]=False ):
with open(UpperCAmelCase_ , encoding="utf-8" ) as f:
lowerCamelCase_ = yaml.safe_load(f.read() )
# Get to the API doc
lowerCamelCase_ = 0
while content[api_idx]["title"] != "API":
api_idx += 1
lowerCamelCase_ = content[api_idx]["sections"]
# Then to the model doc
lowerCamelCase_ = 0
while api_doc[pipeline_idx]["title"] != "Pipelines":
pipeline_idx += 1
lowerCamelCase_ = False
lowerCamelCase_ = api_doc[pipeline_idx]["sections"]
lowerCamelCase_ = []
# sort sub pipeline docs
for pipeline_doc in pipeline_docs:
if "section" in pipeline_doc:
lowerCamelCase_ = pipeline_doc["section"]
lowerCamelCase_ = clean_doc_toc(UpperCAmelCase_ )
if overwrite:
lowerCamelCase_ = new_sub_pipeline_doc
new_pipeline_docs.append(UpperCAmelCase_ )
# sort overall pipeline doc
lowerCamelCase_ = clean_doc_toc(UpperCAmelCase_ )
if new_pipeline_docs != pipeline_docs:
lowerCamelCase_ = True
if overwrite:
lowerCamelCase_ = new_pipeline_docs
if diff:
if overwrite:
lowerCamelCase_ = api_doc
with open(UpperCAmelCase_ , "w" , encoding="utf-8" ) as f:
f.write(yaml.dump(UpperCAmelCase_ , allow_unicode=UpperCAmelCase_ ) )
else:
raise ValueError(
"The model doc part of the table of content is not properly sorted, run `make style` to fix this." )
if __name__ == "__main__":
a_ : Tuple = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
a_ : int = parser.parse_args()
check_scheduler_doc(args.fix_and_overwrite)
check_pipeline_doc(args.fix_and_overwrite)
| 675 | 1 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
lowerCAmelCase_ = None
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
lowerCAmelCase_ = 2
@register_to_config
def __init__( self : List[str] , __lowercase : Dict = 0.02 , __lowercase : Optional[Any] = 1_00 , __lowercase : List[Any] = 1.007 , __lowercase : List[str] = 80 , __lowercase : List[str] = 0.05 , __lowercase : Union[str, Any] = 50 , ):
"""simple docstring"""
snake_case_ = sigma_max
# setable values
snake_case_ = None
snake_case_ = None
snake_case_ = None # sigma(t_i)
def snake_case__ ( self : Optional[int] , __lowercase : Tuple , __lowercase : Dict = None ):
"""simple docstring"""
return sample
def snake_case__ ( self : Any , __lowercase : Tuple , __lowercase : List[Any] = None ):
"""simple docstring"""
snake_case_ = num_inference_steps
snake_case_ = np.arange(0 , self.num_inference_steps )[::-1].copy()
snake_case_ = torch.from_numpy(snake_case__ ).to(snake_case__ )
snake_case_ = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in self.timesteps
]
snake_case_ = torch.tensor(snake_case__ , dtype=torch.floataa , device=snake_case__ )
def snake_case__ ( self : Optional[int] , __lowercase : Union[str, Any] , __lowercase : Tuple , __lowercase : Tuple = None ):
"""simple docstring"""
if self.config.s_min <= sigma <= self.config.s_max:
snake_case_ = min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1 )
else:
snake_case_ = 0
# sample eps ~ N(0, S_noise^2 * I)
snake_case_ = self.config.s_noise * randn_tensor(sample.shape , generator=snake_case__ ).to(sample.device )
snake_case_ = sigma + gamma * sigma
snake_case_ = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def snake_case__ ( self : int , __lowercase : List[Any] , __lowercase : str , __lowercase : List[Any] , __lowercase : List[Any] , __lowercase : Any = True , ):
"""simple docstring"""
snake_case_ = sample_hat + sigma_hat * model_output
snake_case_ = (sample_hat - pred_original_sample) / sigma_hat
snake_case_ = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=snake_case__ , derivative=snake_case__ , pred_original_sample=snake_case__ )
def snake_case__ ( self : Dict , __lowercase : List[str] , __lowercase : int , __lowercase : Union[str, Any] , __lowercase : Optional[int] , __lowercase : Optional[int] , __lowercase : Optional[int] , __lowercase : int = True , ):
"""simple docstring"""
snake_case_ = sample_prev + sigma_prev * model_output
snake_case_ = (sample_prev - pred_original_sample) / sigma_prev
snake_case_ = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=snake_case__ , derivative=snake_case__ , pred_original_sample=snake_case__ )
def snake_case__ ( self : Optional[Any] , __lowercase : Optional[Any] , __lowercase : Any , __lowercase : List[str] ):
"""simple docstring"""
raise NotImplementedError()
| 719 |
import json
import os
from collections import Counter
import torch
import torchvision
import torchvision.transforms as transforms
from PIL import Image
from torch import nn
from torch.utils.data import Dataset
lowercase__ : Optional[int] = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)}
class UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : str , __lowercase : Dict ):
"""simple docstring"""
super().__init__()
snake_case_ = torchvision.models.resnetaaa(pretrained=__lowercase )
snake_case_ = list(model.children() )[:-2]
snake_case_ = nn.Sequential(*__lowercase )
snake_case_ = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] )
def snake_case__ ( self : int , __lowercase : List[str] ):
"""simple docstring"""
snake_case_ = self.pool(self.model(__lowercase ) )
snake_case_ = torch.flatten(__lowercase , start_dim=2 )
snake_case_ = out.transpose(1 , 2 ).contiguous()
return out # BxNx2048
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self : Any , __lowercase : Any , __lowercase : str , __lowercase : Optional[int] , __lowercase : Dict , __lowercase : List[str] ):
"""simple docstring"""
snake_case_ = [json.loads(__lowercase ) for l in open(__lowercase )]
snake_case_ = os.path.dirname(__lowercase )
snake_case_ = tokenizer
snake_case_ = labels
snake_case_ = len(__lowercase )
snake_case_ = max_seq_length
snake_case_ = transforms
def __len__( self : Optional[Any] ):
"""simple docstring"""
return len(self.data )
def __getitem__( self : str , __lowercase : Optional[int] ):
"""simple docstring"""
snake_case_ = torch.LongTensor(self.tokenizer.encode(self.data[index]["text"] , add_special_tokens=__lowercase ) )
snake_case_ , snake_case_ , snake_case_ = sentence[0], sentence[1:-1], sentence[-1]
snake_case_ = sentence[: self.max_seq_length]
snake_case_ = torch.zeros(self.n_classes )
snake_case_ = 1
snake_case_ = Image.open(os.path.join(self.data_dir , self.data[index]["img"] ) ).convert("RGB" )
snake_case_ = self.transforms(__lowercase )
return {
"image_start_token": start_token,
"image_end_token": end_token,
"sentence": sentence,
"image": image,
"label": label,
}
def snake_case__ ( self : Tuple ):
"""simple docstring"""
snake_case_ = Counter()
for row in self.data:
label_freqs.update(row["label"] )
return label_freqs
def lowerCamelCase__ ( _A ):
'''simple docstring'''
snake_case_ = [len(row["sentence"] ) for row in batch]
snake_case_ , snake_case_ = len(_A ), max(_A )
snake_case_ = torch.zeros(_A , _A , dtype=torch.long )
snake_case_ = torch.zeros(_A , _A , dtype=torch.long )
for i_batch, (input_row, length) in enumerate(zip(_A , _A ) ):
snake_case_ = input_row["sentence"]
snake_case_ = 1
snake_case_ = torch.stack([row["image"] for row in batch] )
snake_case_ = torch.stack([row["label"] for row in batch] )
snake_case_ = torch.stack([row["image_start_token"] for row in batch] )
snake_case_ = torch.stack([row["image_end_token"] for row in batch] )
return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor
def lowerCamelCase__ ( ):
'''simple docstring'''
return [
"Crime",
"Drama",
"Thriller",
"Action",
"Comedy",
"Romance",
"Documentary",
"Short",
"Mystery",
"History",
"Family",
"Adventure",
"Fantasy",
"Sci-Fi",
"Western",
"Horror",
"Sport",
"War",
"Music",
"Musical",
"Animation",
"Biography",
"Film-Noir",
]
def lowerCamelCase__ ( ):
'''simple docstring'''
return transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.46_77_70_44, 0.44_53_14_29, 0.40_66_10_17] , std=[0.12_22_19_94, 0.12_14_58_35, 0.14_38_04_69] , ),
] )
| 139 | 0 |
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_UpperCAmelCase : str = logging.get_logger(__name__)
_UpperCAmelCase : Dict = {"vocab_file": "vocab.json"}
_UpperCAmelCase : Optional[Any] = {
"vocab_file": {
"mgp-str": "https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json",
}
}
_UpperCAmelCase : Tuple = {"mgp-str": 27}
class lowerCAmelCase_ ( snake_case__ ):
UpperCamelCase_ :Union[str, Any] = VOCAB_FILES_NAMES
UpperCamelCase_ :Tuple = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ :str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : int , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[Any]="[GO]" , SCREAMING_SNAKE_CASE_ : List[Any]="[GO]" , SCREAMING_SNAKE_CASE_ : Optional[Any]="[s]" , SCREAMING_SNAKE_CASE_ : Any="[GO]" , **SCREAMING_SNAKE_CASE_ : Dict ):
super().__init__(
unk_token=SCREAMING_SNAKE_CASE_ , bos_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
with open(SCREAMING_SNAKE_CASE_ , encoding='''utf-8''' ) as vocab_handle:
lowerCAmelCase__ = json.load(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = {v: k for k, v in self.vocab.items()}
@property
def __snake_case ( self : List[Any] ):
return len(self.vocab )
def __snake_case ( self : Optional[int] ):
return dict(self.vocab , **self.added_tokens_encoder )
def __snake_case ( self : int , SCREAMING_SNAKE_CASE_ : str ):
lowerCAmelCase__ = []
for s in text:
char_tokens.extend(SCREAMING_SNAKE_CASE_ )
return char_tokens
def __snake_case ( self : Any , SCREAMING_SNAKE_CASE_ : str ):
return self.vocab.get(SCREAMING_SNAKE_CASE_ , self.vocab.get(self.unk_token ) )
def __snake_case ( self : int , SCREAMING_SNAKE_CASE_ : Optional[int] ):
return self.decoder.get(SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : List[str] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[str] = None ):
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error('''Vocabulary path ({}) should be a directory'''.format(SCREAMING_SNAKE_CASE_ ) )
return
lowerCAmelCase__ = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
with open(SCREAMING_SNAKE_CASE_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=SCREAMING_SNAKE_CASE_ , ensure_ascii=SCREAMING_SNAKE_CASE_ ) + '''\n''' )
return (vocab_file,)
| 668 |
def lowerCAmelCase_ (lowercase__ : list ) -> list:
'''simple docstring'''
lowerCAmelCase__ = len(lowercase__ )
for i in range(1 , lowercase__ ):
lowerCAmelCase__ = collection[i]
lowerCAmelCase__ = 0
lowerCAmelCase__ = i - 1
while low <= high:
lowerCAmelCase__ = (low + high) // 2
if val < collection[mid]:
lowerCAmelCase__ = mid - 1
else:
lowerCAmelCase__ = mid + 1
for j in range(lowercase__ , lowercase__ , -1 ):
lowerCAmelCase__ = collection[j - 1]
lowerCAmelCase__ = val
return collection
if __name__ == "__main__":
_UpperCAmelCase : Tuple = input("Enter numbers separated by a comma:\n").strip()
_UpperCAmelCase : Tuple = [int(item) for item in user_input.split(",")]
print(binary_insertion_sort(unsorted))
| 668 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'''facebook/nllb-moe-54B''': '''https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json''',
}
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
lowerCamelCase_ : int = 'nllb-moe'
lowerCamelCase_ : List[str] = ['past_key_values']
lowerCamelCase_ : Tuple = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__(self : Optional[int] , _snake_case : Optional[Any]=12_8112 , _snake_case : List[str]=1024 , _snake_case : Union[str, Any]=12 , _snake_case : Tuple=4096 , _snake_case : Optional[int]=16 , _snake_case : Union[str, Any]=12 , _snake_case : Dict=4096 , _snake_case : Tuple=16 , _snake_case : Optional[Any]=0.05 , _snake_case : int=0.05 , _snake_case : Optional[Any]=True , _snake_case : Dict=True , _snake_case : str="relu" , _snake_case : Dict=1024 , _snake_case : str=0.1 , _snake_case : List[str]=0.1 , _snake_case : Optional[int]=0.0 , _snake_case : Union[str, Any]=0.02 , _snake_case : Dict=2 , _snake_case : Optional[Any]=True , _snake_case : Union[str, Any]=False , _snake_case : List[Any]="float32" , _snake_case : Optional[int]=False , _snake_case : Union[str, Any]=128 , _snake_case : Optional[int]=64 , _snake_case : str=4 , _snake_case : List[str]=4 , _snake_case : str=0.001 , _snake_case : Dict=0.001 , _snake_case : str="all" , _snake_case : Dict=False , _snake_case : Dict=False , _snake_case : List[Any]=1.0 , _snake_case : Optional[Any]=0.2 , _snake_case : str=1 , _snake_case : Optional[int]=0 , _snake_case : Tuple=2 , _snake_case : Tuple=False , **_snake_case : Optional[int] , ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ : str = vocab_size
lowerCamelCase_ : Any = max_position_embeddings
lowerCamelCase_ : str = d_model
lowerCamelCase_ : List[Any] = encoder_ffn_dim
lowerCamelCase_ : Optional[int] = encoder_layers
lowerCamelCase_ : Union[str, Any] = encoder_attention_heads
lowerCamelCase_ : List[Any] = decoder_ffn_dim
lowerCamelCase_ : Optional[Any] = decoder_layers
lowerCamelCase_ : Optional[Any] = decoder_attention_heads
lowerCamelCase_ : List[str] = dropout
lowerCamelCase_ : int = attention_dropout
lowerCamelCase_ : int = activation_dropout
lowerCamelCase_ : List[str] = activation_function
lowerCamelCase_ : Any = init_std
lowerCamelCase_ : str = encoder_layerdrop
lowerCamelCase_ : Optional[int] = decoder_layerdrop
lowerCamelCase_ : Optional[int] = use_cache
lowerCamelCase_ : Tuple = encoder_layers
lowerCamelCase_ : List[Any] = scale_embedding # scale factor will be sqrt(d_model) if True
lowerCamelCase_ : int = router_z_loss_coef
lowerCamelCase_ : Optional[int] = router_aux_loss_coef
lowerCamelCase_ : Union[str, Any] = decoder_sparse_step
lowerCamelCase_ : Any = encoder_sparse_step
lowerCamelCase_ : Optional[Any] = num_experts
lowerCamelCase_ : Union[str, Any] = expert_capacity
lowerCamelCase_ : Union[str, Any] = router_bias
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f'`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}' )
lowerCamelCase_ : Optional[int] = router_dtype
lowerCamelCase_ : Optional[Any] = router_ignore_padding_tokens
lowerCamelCase_ : Union[str, Any] = batch_prioritized_routing
lowerCamelCase_ : List[str] = second_expert_policy
lowerCamelCase_ : Optional[Any] = normalize_router_prob_before_dropping
lowerCamelCase_ : List[Any] = moe_eval_capacity_token_fraction
lowerCamelCase_ : List[Any] = moe_token_dropout
lowerCamelCase_ : Tuple = output_router_logits
super().__init__(
pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , is_encoder_decoder=_lowercase , decoder_start_token_id=_lowercase , **_lowercase , )
| 721 |
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class lowerCamelCase__ ( UpperCAmelCase ):
def UpperCAmelCase_ (self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ : List[str] = SMALL_MODEL_IDENTIFIER
lowerCamelCase_ : str = 'pt'
lowerCamelCase_ : List[Any] = 'tf'
def UpperCAmelCase_ (self : List[str] , _snake_case : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ : Tuple = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(_snake_case )
def UpperCAmelCase_ (self : Union[str, Any] , _snake_case : Optional[Any] ) -> int:
"""simple docstring"""
lowerCamelCase_ : Optional[Any] = TFAutoModel.from_pretrained(self.test_model , from_pt=_snake_case )
model_tf.save_pretrained(_snake_case )
def UpperCAmelCase_ (self : Optional[Any] ) -> str:
"""simple docstring"""
lowerCamelCase_ : List[Any] = 'mock_framework'
# Framework provided - return whatever the user provides
lowerCamelCase_ : str = FeaturesManager.determine_framework(self.test_model , _snake_case )
self.assertEqual(_snake_case , _snake_case )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(_snake_case )
lowerCamelCase_ : Optional[Any] = FeaturesManager.determine_framework(_snake_case , _snake_case )
self.assertEqual(_snake_case , _snake_case )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(_snake_case )
lowerCamelCase_ : Optional[Any] = FeaturesManager.determine_framework(_snake_case , _snake_case )
self.assertEqual(_snake_case , _snake_case )
def UpperCAmelCase_ (self : Tuple ) -> int:
"""simple docstring"""
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(_snake_case )
lowerCamelCase_ : str = FeaturesManager.determine_framework(_snake_case )
self.assertEqual(_snake_case , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(_snake_case )
lowerCamelCase_ : List[str] = FeaturesManager.determine_framework(_snake_case )
self.assertEqual(_snake_case , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(_snake_case ):
lowerCamelCase_ : int = FeaturesManager.determine_framework(_snake_case )
def UpperCAmelCase_ (self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ : Union[str, Any] = MagicMock(return_value=_snake_case )
with patch('transformers.onnx.features.is_tf_available' , _snake_case ):
lowerCamelCase_ : List[str] = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(_snake_case , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
lowerCamelCase_ : str = MagicMock(return_value=_snake_case )
with patch('transformers.onnx.features.is_torch_available' , _snake_case ):
lowerCamelCase_ : Any = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(_snake_case , self.framework_tf )
# Both in environment -> use PyTorch
lowerCamelCase_ : Optional[Any] = MagicMock(return_value=_snake_case )
lowerCamelCase_ : Optional[Any] = MagicMock(return_value=_snake_case )
with patch('transformers.onnx.features.is_tf_available' , _snake_case ), patch(
'transformers.onnx.features.is_torch_available' , _snake_case ):
lowerCamelCase_ : Union[str, Any] = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(_snake_case , self.framework_pt )
# Both not in environment -> raise error
lowerCamelCase_ : Union[str, Any] = MagicMock(return_value=_snake_case )
lowerCamelCase_ : Optional[int] = MagicMock(return_value=_snake_case )
with patch('transformers.onnx.features.is_tf_available' , _snake_case ), patch(
'transformers.onnx.features.is_torch_available' , _snake_case ):
with self.assertRaises(_snake_case ):
lowerCamelCase_ : Union[str, Any] = FeaturesManager.determine_framework(self.test_model )
| 144 | 0 |
'''simple docstring'''
import numpy as np
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModelWithProjection, PreTrainedModel
from ...utils import logging
_lowercase = logging.get_logger(__name__)
class _lowercase ( __a ):
_UpperCAmelCase = CLIPConfig
_UpperCAmelCase = ['''CLIPEncoderLayer''']
def __init__( self , A__ ) -> int:
super().__init__(A__ )
snake_case = CLIPVisionModelWithProjection(config.vision_config )
snake_case = nn.Linear(config.vision_config.projection_dim , 1 )
snake_case = nn.Linear(config.vision_config.projection_dim , 1 )
@torch.no_grad()
def UpperCamelCase ( self , A__ , A__ , A__=0.5 , A__=0.5 ) -> int:
snake_case = self.vision_model(A__ )[0]
snake_case = self.p_head(A__ )
snake_case = nsfw_detected.flatten()
snake_case = nsfw_detected > p_threshold
snake_case = nsfw_detected.tolist()
if any(A__ ):
logger.warning(
'''Potential NSFW content was detected in one or more images. A black image will be returned instead.'''
''' Try again with a different prompt and/or seed.''' )
for idx, nsfw_detected_ in enumerate(A__ ):
if nsfw_detected_:
snake_case = np.zeros(images[idx].shape )
snake_case = self.w_head(A__ )
snake_case = watermark_detected.flatten()
snake_case = watermark_detected > w_threshold
snake_case = watermark_detected.tolist()
if any(A__ ):
logger.warning(
'''Potential watermarked content was detected in one or more images. A black image will be returned instead.'''
''' Try again with a different prompt and/or seed.''' )
for idx, watermark_detected_ in enumerate(A__ ):
if watermark_detected_:
snake_case = np.zeros(images[idx].shape )
return images, nsfw_detected, watermark_detected
| 342 |
'''simple docstring'''
import importlib.metadata
from typing import Union
from packaging.version import Version, parse
from .constants import STR_OPERATION_TO_FUNC
_lowercase = parse(importlib.metadata.version('torch'))
def __UpperCamelCase ( a : Union[str, Version] , a : str , a : str ) ->Optional[Any]:
if operation not in STR_OPERATION_TO_FUNC.keys():
raise ValueError(f"""`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys() )}, received {operation}""" )
snake_case = STR_OPERATION_TO_FUNC[operation]
if isinstance(a , a ):
snake_case = parse(importlib.metadata.version(a ) )
return operation(a , parse(a ) )
def __UpperCamelCase ( a : str , a : str ) ->List[str]:
return compare_versions(a , a , a )
| 342 | 1 |
'''simple docstring'''
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class UpperCamelCase__ ( lowerCamelCase__ ):
"""simple docstring"""
UpperCAmelCase__ = 'Speech2TextFeatureExtractor'
UpperCAmelCase__ = 'Speech2TextTokenizer'
def __init__( self : Optional[int] , __A : Union[str, Any] , __A : Any ):
"""simple docstring"""
super().__init__(__A , __A )
_lowercase = self.feature_extractor
_lowercase = False
def __call__( self : Optional[int] , *__A : Tuple , **__A : Any ):
"""simple docstring"""
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*__A , **__A )
if "raw_speech" in kwargs:
warnings.warn("Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead." )
_lowercase = kwargs.pop("raw_speech" )
else:
_lowercase = kwargs.pop("audio" , __A )
_lowercase = kwargs.pop("sampling_rate" , __A )
_lowercase = kwargs.pop("text" , __A )
if len(__A ) > 0:
_lowercase = args[0]
_lowercase = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if audio is not None:
_lowercase = self.feature_extractor(__A , *__A , sampling_rate=__A , **__A )
if text is not None:
_lowercase = self.tokenizer(__A , **__A )
if text is None:
return inputs
elif audio is None:
return encodings
else:
_lowercase = encodings["input_ids"]
return inputs
def snake_case ( self : List[str] , *__A : Optional[int] , **__A : Optional[int] ):
"""simple docstring"""
return self.tokenizer.batch_decode(*__A , **__A )
def snake_case ( self : Optional[int] , *__A : Optional[int] , **__A : Union[str, Any] ):
"""simple docstring"""
return self.tokenizer.decode(*__A , **__A )
@contextmanager
def snake_case ( self : Any ):
"""simple docstring"""
warnings.warn(
"`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your "
"labels by using the argument `text` of the regular `__call__` method (either in the same call as "
"your audio inputs, or in a separate call." )
_lowercase = True
_lowercase = self.tokenizer
yield
_lowercase = self.feature_extractor
_lowercase = False
| 710 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
__magic_name__ : int = TypeVar('''T''')
class UpperCamelCase__ ( Generic[T] ):
"""simple docstring"""
def __init__( self : List[str] , __A : T ):
"""simple docstring"""
_lowercase = data
_lowercase = None
def __str__( self : Dict ):
"""simple docstring"""
return f"""{self.data}"""
class UpperCamelCase__ ( Generic[T] ):
"""simple docstring"""
def __init__( self : str ):
"""simple docstring"""
_lowercase = None
def __iter__( self : str ):
"""simple docstring"""
_lowercase = self.top
while node:
yield node.data
_lowercase = node.next
def __str__( self : List[Any] ):
"""simple docstring"""
return "->".join([str(__A ) for item in self] )
def __len__( self : Optional[Any] ):
"""simple docstring"""
return len(tuple(iter(self ) ) )
def snake_case ( self : Optional[Any] ):
"""simple docstring"""
return self.top is None
def snake_case ( self : Optional[int] , __A : T ):
"""simple docstring"""
_lowercase = Node(__A )
if not self.is_empty():
_lowercase = self.top
_lowercase = node
def snake_case ( self : int ):
"""simple docstring"""
if self.is_empty():
raise IndexError("pop from empty stack" )
assert isinstance(self.top , __A )
_lowercase = self.top
_lowercase = self.top.next
return pop_node.data
def snake_case ( self : Optional[int] ):
"""simple docstring"""
if self.is_empty():
raise IndexError("peek from empty stack" )
assert self.top is not None
return self.top.data
def snake_case ( self : Optional[Any] ):
"""simple docstring"""
_lowercase = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 602 | 0 |
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int ):
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ = f'''Input value of [number={number}] must be an integer'''
raise TypeError(UpperCamelCase__ )
if number < 0:
return False
SCREAMING_SNAKE_CASE__ = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod() | 6 |
'''simple docstring'''
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def _snake_case ( A_ : List[str] ):
"""simple docstring"""
a_ , a_ : Optional[int] = image.size
a_ , a_ : Optional[Any] = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
a_ : List[Any] = image.resize((w, h) , resample=PIL_INTERPOLATION["""lanczos"""] )
a_ : Tuple = np.array(A_ ).astype(np.floataa ) / 255.0
a_ : Tuple = image[None].transpose(0 , 3 , 1 , 2 )
a_ : Tuple = torch.from_numpy(A_ )
return 2.0 * image - 1.0
class _UpperCAmelCase ( lowerCAmelCase__ ):
"""simple docstring"""
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ):
'''simple docstring'''
super().__init__()
self.register_modules(vqvae=lowerCAmelCase_ , unet=lowerCAmelCase_ , scheduler=lowerCAmelCase_ )
@torch.no_grad()
def __call__( self , lowerCAmelCase_ = None , lowerCAmelCase_ = 1 , lowerCAmelCase_ = 1_00 , lowerCAmelCase_ = 0.0 , lowerCAmelCase_ = None , lowerCAmelCase_ = "pil" , lowerCAmelCase_ = True , ):
'''simple docstring'''
if isinstance(lowerCAmelCase_ , PIL.Image.Image ):
a_ : str = 1
elif isinstance(lowerCAmelCase_ , torch.Tensor ):
a_ : Tuple = image.shape[0]
else:
raise ValueError(f'''`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(lowerCAmelCase_ )}''' )
if isinstance(lowerCAmelCase_ , PIL.Image.Image ):
a_ : Any = preprocess(lowerCAmelCase_ )
a_ , a_ : Dict = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
a_ : Optional[int] = (batch_size, self.unet.config.in_channels // 2, height, width)
a_ : str = next(self.unet.parameters() ).dtype
a_ : Dict = randn_tensor(lowerCAmelCase_ , generator=lowerCAmelCase_ , device=self.device , dtype=lowerCAmelCase_ )
a_ : Optional[Any] = image.to(device=self.device , dtype=lowerCAmelCase_ )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(lowerCAmelCase_ , device=self.device )
a_ : Dict = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
a_ : Tuple = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
a_ : Union[str, Any] = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
a_ : int = {}
if accepts_eta:
a_ : Union[str, Any] = eta
for t in self.progress_bar(lowerCAmelCase_ ):
# concat latents and low resolution image in the channel dimension.
a_ : int = torch.cat([latents, image] , dim=1 )
a_ : Dict = self.scheduler.scale_model_input(lowerCAmelCase_ , lowerCAmelCase_ )
# predict the noise residual
a_ : Optional[Any] = self.unet(lowerCAmelCase_ , lowerCAmelCase_ ).sample
# compute the previous noisy sample x_t -> x_t-1
a_ : Tuple = self.scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ ).prev_sample
# decode the image latents with the VQVAE
a_ : List[str] = self.vqvae.decode(lowerCAmelCase_ ).sample
a_ : Tuple = torch.clamp(lowerCAmelCase_ , -1.0 , 1.0 )
a_ : Optional[int] = image / 2 + 0.5
a_ : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
a_ : str = self.numpy_to_pil(lowerCAmelCase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCAmelCase_ )
| 577 | 0 |
import argparse
import torch
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt
if __name__ == "__main__":
A__: Dict = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
# !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml
parser.add_argument(
'''--original_config_file''',
default=None,
type=str,
help='''The YAML config file corresponding to the original architecture.''',
)
parser.add_argument(
'''--num_in_channels''',
default=None,
type=int,
help='''The number of input channels. If `None` number of input channels will be automatically inferred.''',
)
parser.add_argument(
'''--scheduler_type''',
default='''pndm''',
type=str,
help='''Type of scheduler to use. Should be one of [\'pndm\', \'lms\', \'ddim\', \'euler\', \'euler-ancestral\', \'dpm\']''',
)
parser.add_argument(
'''--pipeline_type''',
default=None,
type=str,
help=(
'''The pipeline type. One of \'FrozenOpenCLIPEmbedder\', \'FrozenCLIPEmbedder\', \'PaintByExample\''''
'''. If `None` pipeline will be automatically inferred.'''
),
)
parser.add_argument(
'''--image_size''',
default=None,
type=int,
help=(
'''The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'''
''' Base. Use 768 for Stable Diffusion v2.'''
),
)
parser.add_argument(
'''--prediction_type''',
default=None,
type=str,
help=(
'''The prediction type that the model was trained on. Use \'epsilon\' for Stable Diffusion v1.X and Stable'''
''' Diffusion v2 Base. Use \'v_prediction\' for Stable Diffusion v2.'''
),
)
parser.add_argument(
'''--extract_ema''',
action='''store_true''',
help=(
'''Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'''
''' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'''
''' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'''
),
)
parser.add_argument(
'''--upcast_attention''',
action='''store_true''',
help=(
'''Whether the attention computation should always be upcasted. This is necessary when running stable'''
''' diffusion 2.1.'''
),
)
parser.add_argument(
'''--from_safetensors''',
action='''store_true''',
help='''If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.''',
)
parser.add_argument(
'''--to_safetensors''',
action='''store_true''',
help='''Whether to store pipeline in safetensors format or not.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument('''--device''', type=str, help='''Device to use (e.g. cpu, cuda:0, cuda:1, etc.)''')
parser.add_argument(
'''--stable_unclip''',
type=str,
default=None,
required=False,
help='''Set if this is a stable unCLIP model. One of \'txt2img\' or \'img2img\'.''',
)
parser.add_argument(
'''--stable_unclip_prior''',
type=str,
default=None,
required=False,
help='''Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.''',
)
parser.add_argument(
'''--clip_stats_path''',
type=str,
help='''Path to the clip stats file. Only required if the stable unclip model\'s config specifies `model.params.noise_aug_config.params.clip_stats_path`.''',
required=False,
)
parser.add_argument(
'''--controlnet''', action='''store_true''', default=None, help='''Set flag if this is a controlnet checkpoint.'''
)
parser.add_argument('''--half''', action='''store_true''', help='''Save weights in half precision.''')
parser.add_argument(
'''--vae_path''',
type=str,
default=None,
required=False,
help='''Set to a path, hub id to an already converted vae to not convert it again.''',
)
A__: Optional[int] = parser.parse_args()
A__: Optional[int] = download_from_original_stable_diffusion_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
prediction_type=args.prediction_type,
model_type=args.pipeline_type,
extract_ema=args.extract_ema,
scheduler_type=args.scheduler_type,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
stable_unclip=args.stable_unclip,
stable_unclip_prior=args.stable_unclip_prior,
clip_stats_path=args.clip_stats_path,
controlnet=args.controlnet,
vae_path=args.vae_path,
)
if args.half:
pipe.to(torch_dtype=torch.floataa)
if args.controlnet:
# only save the controlnet model
pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
else:
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 221 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class _a ( unittest.TestCase):
"""simple docstring"""
def UpperCAmelCase_ ( self: str ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCAmelCase_ ( self: Tuple ):
'''simple docstring'''
UpperCamelCase__: str = 1
UpperCamelCase__: Dict = 3
UpperCamelCase__: Optional[int] = (32, 32)
UpperCamelCase__: Optional[Any] = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(__lowerCamelCase )
return image
@property
def UpperCAmelCase_ ( self: Optional[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCamelCase__: Union[str, Any] = UNetaDConditionModel(
block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=__lowerCamelCase , only_cross_attention=(True, True, False) , num_class_embeds=100 , )
return model
@property
def UpperCAmelCase_ ( self: str ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCamelCase__: int = AutoencoderKL(
block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
return model
@property
def UpperCAmelCase_ ( self: str ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCamelCase__: Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="gelu" , projection_dim=512 , )
return CLIPTextModel(__lowerCamelCase )
def UpperCAmelCase_ ( self: str ):
'''simple docstring'''
UpperCamelCase__: Any = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCamelCase__: Dict = self.dummy_cond_unet_upscale
UpperCamelCase__: Union[str, Any] = DDPMScheduler()
UpperCamelCase__: Optional[int] = DDIMScheduler(prediction_type="v_prediction" )
UpperCamelCase__: Optional[int] = self.dummy_vae
UpperCamelCase__: str = self.dummy_text_encoder
UpperCamelCase__: List[str] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
UpperCamelCase__: Union[str, Any] = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCamelCase__: Any = Image.fromarray(np.uinta(__lowerCamelCase ) ).convert("RGB" ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
UpperCamelCase__: str = StableDiffusionUpscalePipeline(
unet=__lowerCamelCase , low_res_scheduler=__lowerCamelCase , scheduler=__lowerCamelCase , vae=__lowerCamelCase , text_encoder=__lowerCamelCase , tokenizer=__lowerCamelCase , max_noise_level=350 , )
UpperCamelCase__: Dict = sd_pipe.to(__lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCamelCase )
UpperCamelCase__: Dict = "A painting of a squirrel eating a burger"
UpperCamelCase__: Union[str, Any] = torch.Generator(device=__lowerCamelCase ).manual_seed(0 )
UpperCamelCase__: Dict = sd_pipe(
[prompt] , image=__lowerCamelCase , generator=__lowerCamelCase , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , )
UpperCamelCase__: List[Any] = output.images
UpperCamelCase__: Optional[Any] = torch.Generator(device=__lowerCamelCase ).manual_seed(0 )
UpperCamelCase__: Tuple = sd_pipe(
[prompt] , image=__lowerCamelCase , generator=__lowerCamelCase , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , return_dict=__lowerCamelCase , )[0]
UpperCamelCase__: Union[str, Any] = image[0, -3:, -3:, -1]
UpperCamelCase__: Optional[Any] = image_from_tuple[0, -3:, -3:, -1]
UpperCamelCase__: List[str] = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
UpperCamelCase__: str = np.array([0.3_113, 0.3_910, 0.4_272, 0.4_859, 0.5_061, 0.4_652, 0.5_362, 0.5_715, 0.5_661] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCAmelCase_ ( self: str ):
'''simple docstring'''
UpperCamelCase__: Optional[Any] = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCamelCase__: List[str] = self.dummy_cond_unet_upscale
UpperCamelCase__: Any = DDPMScheduler()
UpperCamelCase__: Any = DDIMScheduler(prediction_type="v_prediction" )
UpperCamelCase__: List[str] = self.dummy_vae
UpperCamelCase__: Union[str, Any] = self.dummy_text_encoder
UpperCamelCase__: List[str] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
UpperCamelCase__: Tuple = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCamelCase__: Tuple = Image.fromarray(np.uinta(__lowerCamelCase ) ).convert("RGB" ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
UpperCamelCase__: int = StableDiffusionUpscalePipeline(
unet=__lowerCamelCase , low_res_scheduler=__lowerCamelCase , scheduler=__lowerCamelCase , vae=__lowerCamelCase , text_encoder=__lowerCamelCase , tokenizer=__lowerCamelCase , max_noise_level=350 , )
UpperCamelCase__: Optional[int] = sd_pipe.to(__lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCamelCase )
UpperCamelCase__: int = "A painting of a squirrel eating a burger"
UpperCamelCase__: str = sd_pipe(
2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , )
UpperCamelCase__: str = output.images
assert image.shape[0] == 2
UpperCamelCase__: Any = torch.Generator(device=__lowerCamelCase ).manual_seed(0 )
UpperCamelCase__: List[Any] = sd_pipe(
[prompt] , image=__lowerCamelCase , generator=__lowerCamelCase , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , )
UpperCamelCase__: Union[str, Any] = output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def UpperCAmelCase_ ( self: Optional[Any] ):
'''simple docstring'''
UpperCamelCase__: List[Any] = self.dummy_cond_unet_upscale
UpperCamelCase__: List[Any] = DDPMScheduler()
UpperCamelCase__: str = DDIMScheduler(prediction_type="v_prediction" )
UpperCamelCase__: List[Any] = self.dummy_vae
UpperCamelCase__: Optional[Any] = self.dummy_text_encoder
UpperCamelCase__: Optional[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
UpperCamelCase__: int = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCamelCase__: Dict = Image.fromarray(np.uinta(__lowerCamelCase ) ).convert("RGB" ).resize((64, 64) )
# put models in fp16, except vae as it overflows in fp16
UpperCamelCase__: Dict = unet.half()
UpperCamelCase__: Any = text_encoder.half()
# make sure here that pndm scheduler skips prk
UpperCamelCase__: Optional[int] = StableDiffusionUpscalePipeline(
unet=__lowerCamelCase , low_res_scheduler=__lowerCamelCase , scheduler=__lowerCamelCase , vae=__lowerCamelCase , text_encoder=__lowerCamelCase , tokenizer=__lowerCamelCase , max_noise_level=350 , )
UpperCamelCase__: Optional[int] = sd_pipe.to(__lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCamelCase )
UpperCamelCase__: str = "A painting of a squirrel eating a burger"
UpperCamelCase__: Optional[Any] = torch.manual_seed(0 )
UpperCamelCase__: Optional[int] = sd_pipe(
[prompt] , image=__lowerCamelCase , generator=__lowerCamelCase , num_inference_steps=2 , output_type="np" , ).images
UpperCamelCase__: Any = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class _a ( unittest.TestCase):
"""simple docstring"""
def UpperCAmelCase_ ( self: Dict ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
UpperCamelCase__: Optional[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png" )
UpperCamelCase__: Dict = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"
"/upsampled_cat.npy" )
UpperCamelCase__: str = "stabilityai/stable-diffusion-x4-upscaler"
UpperCamelCase__: Optional[int] = StableDiffusionUpscalePipeline.from_pretrained(__lowerCamelCase )
pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
pipe.enable_attention_slicing()
UpperCamelCase__: Dict = "a cat sitting on a park bench"
UpperCamelCase__: Optional[int] = torch.manual_seed(0 )
UpperCamelCase__: Any = pipe(
prompt=__lowerCamelCase , image=__lowerCamelCase , generator=__lowerCamelCase , output_type="np" , )
UpperCamelCase__: int = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1e-3
def UpperCAmelCase_ ( self: Any ):
'''simple docstring'''
UpperCamelCase__: Optional[int] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png" )
UpperCamelCase__: int = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"
"/upsampled_cat_fp16.npy" )
UpperCamelCase__: Optional[Any] = "stabilityai/stable-diffusion-x4-upscaler"
UpperCamelCase__: int = StableDiffusionUpscalePipeline.from_pretrained(
__lowerCamelCase , torch_dtype=torch.floataa , )
pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
pipe.enable_attention_slicing()
UpperCamelCase__: Dict = "a cat sitting on a park bench"
UpperCamelCase__: Optional[int] = torch.manual_seed(0 )
UpperCamelCase__: Optional[Any] = pipe(
prompt=__lowerCamelCase , image=__lowerCamelCase , generator=__lowerCamelCase , output_type="np" , )
UpperCamelCase__: Tuple = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5e-1
def UpperCAmelCase_ ( self: Any ):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
UpperCamelCase__: str = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png" )
UpperCamelCase__: str = "stabilityai/stable-diffusion-x4-upscaler"
UpperCamelCase__: str = StableDiffusionUpscalePipeline.from_pretrained(
__lowerCamelCase , torch_dtype=torch.floataa , )
pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
UpperCamelCase__: Dict = "a cat sitting on a park bench"
UpperCamelCase__: List[Any] = torch.manual_seed(0 )
UpperCamelCase__: Any = pipe(
prompt=__lowerCamelCase , image=__lowerCamelCase , generator=__lowerCamelCase , num_inference_steps=5 , output_type="np" , )
UpperCamelCase__: str = torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 10**9
| 221 | 1 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class __magic_name__ ( metaclass=snake_case__ ):
UpperCamelCase__ = ['keras_nlp']
def __init__( self , *snake_case_ , **snake_case_ ):
requires_backends(self , ['''keras_nlp'''] )
| 72 |
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def lowercase ( _a ,_a ,_a ,_a ,_a ) -> Union[str, Any]:
# Initialise PyTorch model.
# If you want to convert a checkpoint that uses absolute position embeddings, make sure to set reset_position_index_per_cell of
# TapasConfig to False.
# initialize configuration from json file
UpperCAmelCase_: List[str] = TapasConfig.from_json_file(_a )
# set absolute/relative position embeddings parameter
UpperCAmelCase_: List[str] = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
UpperCAmelCase_: str = TapasForQuestionAnswering(config=_a )
elif task == "WTQ":
# run_task_main.py hparams
UpperCAmelCase_: Any = 4
UpperCAmelCase_: List[str] = True
# hparam_utils.py hparams
UpperCAmelCase_: Dict = 0.664_694
UpperCAmelCase_: int = 0.207_951
UpperCAmelCase_: int = 0.121_194
UpperCAmelCase_: Any = True
UpperCAmelCase_: Union[str, Any] = True
UpperCAmelCase_: List[str] = False
UpperCAmelCase_: Optional[int] = 0.0_352_513
UpperCAmelCase_: List[str] = TapasForQuestionAnswering(config=_a )
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
UpperCAmelCase_: Tuple = 4
UpperCAmelCase_: Dict = False
# hparam_utils.py hparams
UpperCAmelCase_: Optional[int] = 36.4_519
UpperCAmelCase_: List[str] = 0.903_421
UpperCAmelCase_: List[str] = 222.088
UpperCAmelCase_: Any = True
UpperCAmelCase_: str = True
UpperCAmelCase_: Dict = True
UpperCAmelCase_: Union[str, Any] = 0.763_141
UpperCAmelCase_: int = TapasForQuestionAnswering(config=_a )
elif task == "TABFACT":
UpperCAmelCase_: Optional[Any] = TapasForSequenceClassification(config=_a )
elif task == "MLM":
UpperCAmelCase_: str = TapasForMaskedLM(config=_a )
elif task == "INTERMEDIATE_PRETRAINING":
UpperCAmelCase_: List[Any] = TapasModel(config=_a )
else:
raise ValueError(f"Task {task} not supported." )
print(f"Building PyTorch model from configuration: {config}" )
# Load weights from tf checkpoint
load_tf_weights_in_tapas(_a ,_a ,_a )
# Save pytorch-model (weights and configuration)
print(f"Save PyTorch model to {pytorch_dump_path}" )
model.save_pretrained(_a )
# Save tokenizer files
print(f"Save tokenizer files to {pytorch_dump_path}" )
UpperCAmelCase_: Any = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + "vocab.txt" ,model_max_length=512 )
tokenizer.save_pretrained(_a )
print("Used relative position embeddings:" ,model.config.reset_position_index_per_cell )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--task""", default="""SQA""", type=str, help="""Model task for which to convert a checkpoint. Defaults to SQA."""
)
parser.add_argument(
"""--reset_position_index_per_cell""",
default=False,
action="""store_true""",
help="""Whether to use relative position embeddings or not. Defaults to True.""",
)
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--tapas_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained TAPAS model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
_lowerCAmelCase = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
) | 137 | 0 |
"""simple docstring"""
__UpperCAmelCase = tuple[float, float, float]
__UpperCAmelCase = tuple[float, float, float]
def _snake_case ( lowercase__ : Pointad , lowercase__ : Pointad ) -> Vectorad:
'''simple docstring'''
lowerCAmelCase_ :List[Any] = end_pointa[0] - end_pointa[0]
lowerCAmelCase_ :Optional[int] = end_pointa[1] - end_pointa[1]
lowerCAmelCase_ :str = end_pointa[2] - end_pointa[2]
return (x, y, z)
def _snake_case ( lowercase__ : Vectorad , lowercase__ : Vectorad ) -> Vectorad:
'''simple docstring'''
lowerCAmelCase_ :int = ab[1] * ac[2] - ab[2] * ac[1] # *i
lowerCAmelCase_ :str = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j
lowerCAmelCase_ :Union[str, Any] = ab[0] * ac[1] - ab[1] * ac[0] # *k
return (x, y, z)
def _snake_case ( lowercase__ : Vectorad , lowercase__ : int ) -> bool:
'''simple docstring'''
return tuple(round(lowercase__ , lowercase__ ) for x in vector ) == (0, 0, 0)
def _snake_case ( lowercase__ : Pointad , lowercase__ : Pointad , lowercase__ : Pointad , lowercase__ : int = 1_0 ) -> bool:
'''simple docstring'''
lowerCAmelCase_ :str = create_vector(lowercase__ , lowercase__ )
lowerCAmelCase_ :List[str] = create_vector(lowercase__ , lowercase__ )
return is_zero_vector(get_ad_vectors_cross(lowercase__ , lowercase__ ) , lowercase__ )
| 256 |
"""simple docstring"""
class _SCREAMING_SNAKE_CASE :
def __init__( self , __A , __A ) -> Optional[Any]:
lowerCAmelCase_ :Union[str, Any] = name
lowerCAmelCase_ :str = val
def __str__( self ) -> Dict:
return f"""{self.__class__.__name__}({self.name}, {self.val})"""
def __lt__( self , __A ) -> Union[str, Any]:
return self.val < other.val
class _SCREAMING_SNAKE_CASE :
def __init__( self , __A ) -> Optional[int]:
lowerCAmelCase_ :Optional[Any] = {}
lowerCAmelCase_ :List[str] = {}
lowerCAmelCase_ :Optional[int] = self.build_heap(__A )
def __getitem__( self , __A ) -> Optional[int]:
return self.get_value(__A )
def __lowerCAmelCase ( self , __A ) -> int:
return (idx - 1) // 2
def __lowerCAmelCase ( self , __A ) -> Optional[Any]:
return idx * 2 + 1
def __lowerCAmelCase ( self , __A ) -> Optional[Any]:
return idx * 2 + 2
def __lowerCAmelCase ( self , __A ) -> Optional[Any]:
return self.heap_dict[key]
def __lowerCAmelCase ( self , __A ) -> Tuple:
lowerCAmelCase_ :str = len(__A ) - 1
lowerCAmelCase_ :Dict = self.get_parent_idx(__A )
for idx, i in enumerate(__A ):
lowerCAmelCase_ :List[Any] = idx
lowerCAmelCase_ :Union[str, Any] = i.val
for i in range(__A , -1 , -1 ):
self.sift_down(__A , __A )
return array
def __lowerCAmelCase ( self , __A , __A ) -> str:
while True:
lowerCAmelCase_ :List[str] = self.get_left_child_idx(__A ) # noqa: E741
lowerCAmelCase_ :int = self.get_right_child_idx(__A )
lowerCAmelCase_ :Dict = idx
if l < len(__A ) and array[l] < array[idx]:
lowerCAmelCase_ :Optional[Any] = l
if r < len(__A ) and array[r] < array[smallest]:
lowerCAmelCase_ :Optional[Any] = r
if smallest != idx:
lowerCAmelCase_ , lowerCAmelCase_ :Union[str, Any] = array[smallest], array[idx]
(
(
lowerCAmelCase_
) , (
lowerCAmelCase_
) ,
) :Tuple = (
self.idx_of_element[array[smallest]],
self.idx_of_element[array[idx]],
)
lowerCAmelCase_ :Optional[Any] = smallest
else:
break
def __lowerCAmelCase ( self , __A ) -> List[Any]:
lowerCAmelCase_ :List[str] = self.get_parent_idx(__A )
while p >= 0 and self.heap[p] > self.heap[idx]:
lowerCAmelCase_ , lowerCAmelCase_ :int = self.heap[idx], self.heap[p]
lowerCAmelCase_ , lowerCAmelCase_ :List[Any] = (
self.idx_of_element[self.heap[idx]],
self.idx_of_element[self.heap[p]],
)
lowerCAmelCase_ :int = p
lowerCAmelCase_ :List[Any] = self.get_parent_idx(__A )
def __lowerCAmelCase ( self ) -> str:
return self.heap[0]
def __lowerCAmelCase ( self ) -> Tuple:
lowerCAmelCase_ , lowerCAmelCase_ :int = self.heap[-1], self.heap[0]
lowerCAmelCase_ , lowerCAmelCase_ :int = (
self.idx_of_element[self.heap[-1]],
self.idx_of_element[self.heap[0]],
)
lowerCAmelCase_ :Optional[Any] = self.heap.pop()
del self.idx_of_element[x]
self.sift_down(0 , self.heap )
return x
def __lowerCAmelCase ( self , __A ) -> Any:
self.heap.append(__A )
lowerCAmelCase_ :Optional[int] = len(self.heap ) - 1
lowerCAmelCase_ :Optional[Any] = node.val
self.sift_up(len(self.heap ) - 1 )
def __lowerCAmelCase ( self ) -> Optional[Any]:
return len(self.heap ) == 0
def __lowerCAmelCase ( self , __A , __A ) -> str:
assert (
self.heap[self.idx_of_element[node]].val > new_value
), "newValue must be less that current value"
lowerCAmelCase_ :List[str] = new_value
lowerCAmelCase_ :str = new_value
self.sift_up(self.idx_of_element[node] )
__UpperCAmelCase = Node('R', -1)
__UpperCAmelCase = Node('B', 6)
__UpperCAmelCase = Node('A', 3)
__UpperCAmelCase = Node('X', 1)
__UpperCAmelCase = Node('E', 4)
# Use one of these two ways to generate Min-Heap
# Generating Min-Heap from array
__UpperCAmelCase = MinHeap([r, b, a, x, e])
# Generating Min-Heap by Insert method
# myMinHeap.insert(a)
# myMinHeap.insert(b)
# myMinHeap.insert(x)
# myMinHeap.insert(r)
# myMinHeap.insert(e)
# Before
print('Min Heap - before decrease key')
for i in my_min_heap.heap:
print(i)
print('Min Heap - After decrease key of node [B -> -17]')
my_min_heap.decrease_key(b, -17)
# After
for i in my_min_heap.heap:
print(i)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 256 | 1 |
"""simple docstring"""
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('''0.12.2'''):
raise Exception('''requires fairseq >= 0.12.2''')
if version.parse(fairseq.__version__) > version.parse('''2'''):
raise Exception('''requires fairseq < v2''')
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = '''Hello, World!'''
SCREAMING_SNAKE_CASE_ = '''en_XX'''
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase = Path("""data_bin""" )
__lowerCAmelCase = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(_lowerCAmelCase ).parent ) , checkpoint_file=Path(_lowerCAmelCase ).name , _name="""xmod_base""" , arch="""xmod_base""" , task="""multilingual_masked_lm""" , data_name_or_path=str(_lowerCAmelCase ) , bpe="""sentencepiece""" , sentencepiece_model=str(Path(_lowerCAmelCase ).parent / """sentencepiece.bpe.model""" ) , src_dict=str(data_dir / """dict.txt""" ) , )
xmod.eval() # disable dropout
print(_lowerCAmelCase )
__lowerCAmelCase = xmod.model.encoder.sentence_encoder
__lowerCAmelCase = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1E-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , """bottleneck""" , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
__lowerCAmelCase = xmod.model.classification_heads["""mnli"""].out_proj.weight.shape[0]
print("""Our X-MOD config:""" , _lowerCAmelCase )
__lowerCAmelCase = XmodForSequenceClassification(_lowerCAmelCase ) if classification_head else XmodForMaskedLM(_lowerCAmelCase )
model.eval()
# Now let's copy all the weights.
# Embeddings
__lowerCAmelCase = xmod_sent_encoder.embed_tokens.weight
__lowerCAmelCase = xmod_sent_encoder.embed_positions.weight
__lowerCAmelCase = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
__lowerCAmelCase = xmod_sent_encoder.layernorm_embedding.weight
__lowerCAmelCase = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
__lowerCAmelCase = model.roberta.encoder.layer[i]
__lowerCAmelCase = xmod_sent_encoder.layers[i]
# self attention
__lowerCAmelCase = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError("""Dimensions of self-attention weights do not match.""" )
__lowerCAmelCase = xmod_layer.self_attn.q_proj.weight
__lowerCAmelCase = xmod_layer.self_attn.q_proj.bias
__lowerCAmelCase = xmod_layer.self_attn.k_proj.weight
__lowerCAmelCase = xmod_layer.self_attn.k_proj.bias
__lowerCAmelCase = xmod_layer.self_attn.v_proj.weight
__lowerCAmelCase = xmod_layer.self_attn.v_proj.bias
# self-attention output
__lowerCAmelCase = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError("""Dimensions of self-attention output weights do not match.""" )
__lowerCAmelCase = xmod_layer.self_attn.out_proj.weight
__lowerCAmelCase = xmod_layer.self_attn.out_proj.bias
__lowerCAmelCase = xmod_layer.self_attn_layer_norm.weight
__lowerCAmelCase = xmod_layer.self_attn_layer_norm.bias
# intermediate
__lowerCAmelCase = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("""Dimensions of intermediate weights do not match.""" )
__lowerCAmelCase = xmod_layer.fca.weight
__lowerCAmelCase = xmod_layer.fca.bias
# output
__lowerCAmelCase = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("""Dimensions of feed-forward weights do not match.""" )
__lowerCAmelCase = xmod_layer.fca.weight
__lowerCAmelCase = xmod_layer.fca.bias
__lowerCAmelCase = xmod_layer.final_layer_norm.weight
__lowerCAmelCase = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
__lowerCAmelCase = xmod_layer.adapter_layer_norm.weight
__lowerCAmelCase = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError("""Lists of language adapters do not match.""" )
for lang_code, adapter in xmod_layer.adapter_modules.items():
__lowerCAmelCase = bert_output.adapter_modules[lang_code]
__lowerCAmelCase = xmod_layer.adapter_modules[lang_code]
__lowerCAmelCase = from_adapter.fca.weight
__lowerCAmelCase = from_adapter.fca.bias
__lowerCAmelCase = from_adapter.fca.weight
__lowerCAmelCase = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
__lowerCAmelCase = xmod_sent_encoder.layer_norm.weight
__lowerCAmelCase = xmod_sent_encoder.layer_norm.bias
if classification_head:
__lowerCAmelCase = xmod.model.classification_heads["""mnli"""].dense.weight
__lowerCAmelCase = xmod.model.classification_heads["""mnli"""].dense.bias
__lowerCAmelCase = xmod.model.classification_heads["""mnli"""].out_proj.weight
__lowerCAmelCase = xmod.model.classification_heads["""mnli"""].out_proj.bias
else:
# LM Head
__lowerCAmelCase = xmod.model.encoder.lm_head.dense.weight
__lowerCAmelCase = xmod.model.encoder.lm_head.dense.bias
__lowerCAmelCase = xmod.model.encoder.lm_head.layer_norm.weight
__lowerCAmelCase = xmod.model.encoder.lm_head.layer_norm.bias
__lowerCAmelCase = xmod.model.encoder.lm_head.weight
__lowerCAmelCase = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
__lowerCAmelCase = xmod.encode(_lowerCAmelCase ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(_lowerCAmelCase )
__lowerCAmelCase = model(_lowerCAmelCase )[0]
if classification_head:
__lowerCAmelCase = xmod.model.classification_heads["""mnli"""](xmod.extract_features(_lowerCAmelCase ) )
else:
__lowerCAmelCase = xmod.model(_lowerCAmelCase , lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape , their_output.shape )
__lowerCAmelCase = torch.max(torch.abs(our_output - their_output ) ).item()
print(f"""max_absolute_diff = {max_absolute_diff}""" ) # ~ 1e-7
__lowerCAmelCase = torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1E-3 )
print("""Do both models output the same tensors?""" , """🔥""" if success else """💩""" )
if not success:
raise Exception("""Something went wRoNg""" )
Path(_lowerCAmelCase ).mkdir(parents=_lowerCAmelCase , exist_ok=_lowerCAmelCase )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--xmod_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--classification_head''', action='''store_true''', help='''Whether to convert a final classification head.'''
)
SCREAMING_SNAKE_CASE_ = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 465 |
"""simple docstring"""
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
'''google/efficientnet-b7''': '''https://huggingface.co/google/efficientnet-b7/resolve/main/config.json''',
}
class lowerCAmelCase_ ( A__ ):
'''simple docstring'''
_snake_case = '''efficientnet'''
def __init__( self , snake_case_ = 3 , snake_case_ = 600 , snake_case_ = 2.0 , snake_case_ = 3.1 , snake_case_ = 8 , snake_case_ = [3, 3, 5, 3, 5, 5, 3] , snake_case_ = [32, 16, 24, 40, 80, 112, 192] , snake_case_ = [16, 24, 40, 80, 112, 192, 320] , snake_case_ = [] , snake_case_ = [1, 2, 2, 2, 1, 2, 1] , snake_case_ = [1, 2, 2, 3, 3, 4, 1] , snake_case_ = [1, 6, 6, 6, 6, 6, 6] , snake_case_ = 0.25 , snake_case_ = "swish" , snake_case_ = 2_560 , snake_case_ = "mean" , snake_case_ = 0.02 , snake_case_ = 0.001 , snake_case_ = 0.99 , snake_case_ = 0.5 , snake_case_ = 0.2 , **snake_case_ , ) -> List[str]:
super().__init__(**snake_case_ )
__lowerCAmelCase = num_channels
__lowerCAmelCase = image_size
__lowerCAmelCase = width_coefficient
__lowerCAmelCase = depth_coefficient
__lowerCAmelCase = depth_divisor
__lowerCAmelCase = kernel_sizes
__lowerCAmelCase = in_channels
__lowerCAmelCase = out_channels
__lowerCAmelCase = depthwise_padding
__lowerCAmelCase = strides
__lowerCAmelCase = num_block_repeats
__lowerCAmelCase = expand_ratios
__lowerCAmelCase = squeeze_expansion_ratio
__lowerCAmelCase = hidden_act
__lowerCAmelCase = hidden_dim
__lowerCAmelCase = pooling_type
__lowerCAmelCase = initializer_range
__lowerCAmelCase = batch_norm_eps
__lowerCAmelCase = batch_norm_momentum
__lowerCAmelCase = dropout_rate
__lowerCAmelCase = drop_connect_rate
__lowerCAmelCase = sum(snake_case_ ) * 4
class lowerCAmelCase_ ( A__ ):
'''simple docstring'''
_snake_case = version.parse('''1.11''' )
@property
def A__ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def A__ ( self ) -> float:
return 1e-5
| 465 | 1 |
'''simple docstring'''
def _a ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ):
return "\n".join(
F'{number} * {i} = {number * i}' for i in range(1 , number_of_terms + 1 ) )
if __name__ == "__main__":
print(multiplication_table(number=5, number_of_terms=10)) | 493 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_dpt import DPTImageProcessor
_snake_case : List[Any] = logging.get_logger(__name__)
class lowerCAmelCase ( __UpperCAmelCase ):
def __init__( self , *UpperCamelCase , **UpperCamelCase ):
warnings.warn(
"The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use DPTImageProcessor instead." , UpperCamelCase , )
super().__init__(*UpperCamelCase , **UpperCamelCase ) | 493 | 1 |
"""simple docstring"""
import argparse
from copy import deepcopy
import numpy as np
from datasets import ClassLabel, DatasetDict, load_dataset
from evaluate import load
from transformers import (
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
Trainer,
TrainerCallback,
TrainingArguments,
set_seed,
)
def _snake_case ( ):
A = argparse.ArgumentParser()
parser.add_argument('--model_ckpt' , type=snake_case__ , default='microsoft/unixcoder-base-nine' )
parser.add_argument('--num_epochs' , type=snake_case__ , default=5 )
parser.add_argument('--batch_size' , type=snake_case__ , default=6 )
parser.add_argument('--gradient_accumulation_steps' , type=snake_case__ , default=1 )
parser.add_argument('--freeze' , type=snake_case__ , default=snake_case__ )
parser.add_argument('--learning_rate' , type=snake_case__ , default=5e-4 )
parser.add_argument('--seed' , type=snake_case__ , default=0 )
parser.add_argument('--lr_scheduler_type' , type=snake_case__ , default='cosine' )
parser.add_argument('--num_warmup_steps' , type=snake_case__ , default=10 )
parser.add_argument('--weight_decay' , type=snake_case__ , default=0.01 )
parser.add_argument('--output_dir' , type=snake_case__ , default='./results' )
return parser.parse_args()
_lowercase = load('''accuracy''')
def _snake_case ( snake_case__ : Optional[int] ):
A , A = eval_pred
A = np.argmax(snake_case__ , axis=1 )
return metric.compute(predictions=snake_case__ , references=snake_case__ )
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
def __init__( self : str ,A_ : Dict ) -> None:
super().__init__()
A = trainer
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Tuple ,A_ : Any ,A_ : str ,**A_ : Union[str, Any] ) -> int:
if control.should_evaluate:
A = deepcopy(A_ )
self._trainer.evaluate(eval_dataset=self._trainer.train_dataset ,metric_key_prefix='train' )
return control_copy
def _snake_case ( ):
A = get_args()
set_seed(args.seed )
A = load_dataset('codeparrot/codecomplex' , split='train' )
A = dataset.train_test_split(test_size=0.2 )
A = train_test['test'].train_test_split(test_size=0.5 )
A = DatasetDict(
{
'train': train_test['train'],
'test': test_validation['train'],
'valid': test_validation['test'],
} )
print('Loading tokenizer and model' )
A = AutoTokenizer.from_pretrained(args.model_ckpt )
A = tokenizer.eos_token
A = AutoModelForSequenceClassification.from_pretrained(args.model_ckpt , num_labels=7 )
A = model.config.eos_token_id
if args.freeze:
for param in model.roberta.parameters():
A = False
A = ClassLabel(num_classes=7 , names=list(set(train_test_validation['train']['complexity'] ) ) )
def tokenize(snake_case__ : Any ):
A = tokenizer(example['src'] , truncation=snake_case__ , max_length=1024 )
A = labels.straint(example['complexity'] )
return {
"input_ids": inputs["input_ids"],
"attention_mask": inputs["attention_mask"],
"label": label,
}
A = train_test_validation.map(
snake_case__ , batched=snake_case__ , remove_columns=train_test_validation['train'].column_names , )
A = DataCollatorWithPadding(tokenizer=snake_case__ )
A = TrainingArguments(
output_dir=args.output_dir , learning_rate=args.learning_rate , lr_scheduler_type=args.lr_scheduler_type , evaluation_strategy='epoch' , save_strategy='epoch' , logging_strategy='epoch' , per_device_train_batch_size=args.batch_size , per_device_eval_batch_size=args.batch_size , num_train_epochs=args.num_epochs , gradient_accumulation_steps=args.gradient_accumulation_steps , weight_decay=0.01 , metric_for_best_model='accuracy' , run_name='complexity-java' , report_to='wandb' , )
A = Trainer(
model=snake_case__ , args=snake_case__ , train_dataset=tokenized_datasets['train'] , eval_dataset=tokenized_datasets['valid'] , tokenizer=snake_case__ , data_collator=snake_case__ , compute_metrics=snake_case__ , )
print('Training...' )
trainer.add_callback(CustomCallback(snake_case__ ) )
trainer.train()
if __name__ == "__main__":
main() | 91 |
'''simple docstring'''
import numpy as np
import pandas as pd
from sklearn.preprocessing import Normalizer
from sklearn.svm import SVR
from statsmodels.tsa.statespace.sarimax import SARIMAX
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> float:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = np.array([[1, item, train_mtch[i]] for i, item in enumerate(SCREAMING_SNAKE_CASE_ )] )
_SCREAMING_SNAKE_CASE = np.array(SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = np.dot(np.dot(np.linalg.inv(np.dot(x.transpose() , SCREAMING_SNAKE_CASE_ ) ) , x.transpose() ) , SCREAMING_SNAKE_CASE_ )
return abs(beta[0] + test_dt[0] * beta[1] + test_mtch[0] + beta[2] )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> float:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = (1, 2, 1)
_SCREAMING_SNAKE_CASE = (1, 1, 0, 7)
_SCREAMING_SNAKE_CASE = SARIMAX(
SCREAMING_SNAKE_CASE_ , exog=SCREAMING_SNAKE_CASE_ , order=SCREAMING_SNAKE_CASE_ , seasonal_order=SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = model.fit(disp=SCREAMING_SNAKE_CASE_ , maxiter=6_00 , method="""nm""" )
_SCREAMING_SNAKE_CASE = model_fit.predict(1 , len(SCREAMING_SNAKE_CASE_ ) , exog=[test_match] )
return result[0]
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> float:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = SVR(kernel="""rbf""" , C=1 , gamma=0.1 , epsilon=0.1 )
regressor.fit(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
_SCREAMING_SNAKE_CASE = regressor.predict(SCREAMING_SNAKE_CASE_ )
return y_pred[0]
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> float:
"""simple docstring"""
train_user.sort()
_SCREAMING_SNAKE_CASE = np.percentile(SCREAMING_SNAKE_CASE_ , 25 )
_SCREAMING_SNAKE_CASE = np.percentile(SCREAMING_SNAKE_CASE_ , 75 )
_SCREAMING_SNAKE_CASE = qa - qa
_SCREAMING_SNAKE_CASE = qa - (iqr * 0.1)
return low_lim
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> bool:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = 0
for i in list_vote:
if i > actual_result:
_SCREAMING_SNAKE_CASE = not_safe + 1
else:
if abs(abs(SCREAMING_SNAKE_CASE_ ) - abs(SCREAMING_SNAKE_CASE_ ) ) <= 0.1:
safe += 1
else:
not_safe += 1
return safe > not_safe
if __name__ == "__main__":
# data_input_df = pd.read_csv("ex_data.csv", header=None)
UpperCamelCase__ : Tuple = [[18_231, 0.0, 1], [22_621, 1.0, 2], [15_675, 0.0, 3], [23_583, 1.0, 4]]
UpperCamelCase__ : Tuple = pd.DataFrame(
data_input, columns=["total_user", "total_even", "days"]
)
UpperCamelCase__ : Dict = Normalizer().fit_transform(data_input_df.values)
# split data
UpperCamelCase__ : Tuple = normalize_df[:, 2].tolist()
UpperCamelCase__ : str = normalize_df[:, 0].tolist()
UpperCamelCase__ : Optional[int] = normalize_df[:, 1].tolist()
# for svr (input variable = total date and total match)
UpperCamelCase__ : str = normalize_df[:, [1, 2]].tolist()
UpperCamelCase__ : Optional[int] = x[: len(x) - 1]
UpperCamelCase__ : Tuple = x[len(x) - 1 :]
# for linear regression & sarimax
UpperCamelCase__ : List[Any] = total_date[: len(total_date) - 1]
UpperCamelCase__ : str = total_user[: len(total_user) - 1]
UpperCamelCase__ : Union[str, Any] = total_match[: len(total_match) - 1]
UpperCamelCase__ : Optional[Any] = total_date[len(total_date) - 1 :]
UpperCamelCase__ : List[str] = total_user[len(total_user) - 1 :]
UpperCamelCase__ : Optional[Any] = total_match[len(total_match) - 1 :]
# voting system with forecasting
UpperCamelCase__ : Union[str, Any] = [
linear_regression_prediction(
trn_date, trn_user, trn_match, tst_date, tst_match
),
sarimax_predictor(trn_user, trn_match, tst_match),
support_vector_regressor(x_train, x_test, trn_user),
]
# check the safety of today's data
UpperCamelCase__ : str = "" if data_safety_checker(res_vote, tst_user) else "not "
print("Today's data is {not_str}safe.")
| 591 | 0 |
import json
import os
import unittest
from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast
from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __magic_name__ ( A_ , unittest.TestCase):
_UpperCAmelCase : Optional[Any] = OpenAIGPTTokenizer
_UpperCAmelCase : Optional[Any] = OpenAIGPTTokenizerFast
_UpperCAmelCase : str = True
_UpperCAmelCase : Any = False
def _UpperCAmelCase ( self : Tuple ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCAmelCase = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""w</w>""",
"""r</w>""",
"""t</w>""",
"""lo""",
"""low""",
"""er</w>""",
"""low</w>""",
"""lowest</w>""",
"""newer</w>""",
"""wider</w>""",
"""<unk>""",
]
UpperCAmelCase = dict(zip(__SCREAMING_SNAKE_CASE ,range(len(__SCREAMING_SNAKE_CASE ) ) ) )
UpperCAmelCase = ["""#version: 0.2""", """l o""", """lo w""", """e r</w>""", """"""]
UpperCAmelCase = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["vocab_file"] )
UpperCAmelCase = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file ,"w" ) as fp:
fp.write(json.dumps(__SCREAMING_SNAKE_CASE ) )
with open(self.merges_file ,"w" ) as fp:
fp.write("\n".join(__SCREAMING_SNAKE_CASE ) )
def _UpperCAmelCase ( self : List[Any] ,__SCREAMING_SNAKE_CASE : Any ):
return "lower newer", "lower newer"
def _UpperCAmelCase ( self : Any ):
UpperCAmelCase = OpenAIGPTTokenizer(self.vocab_file ,self.merges_file )
UpperCAmelCase = """lower"""
UpperCAmelCase = ["""low""", """er</w>"""]
UpperCAmelCase = tokenizer.tokenize(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
UpperCAmelCase = tokens + ["""<unk>"""]
UpperCAmelCase = [1_4, 1_5, 2_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) ,__SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self : List[Any] ,__SCREAMING_SNAKE_CASE : int=1_5 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE ,**__SCREAMING_SNAKE_CASE )
# Simple input
UpperCAmelCase = """This is a simple input"""
UpperCAmelCase = ["""This is a simple input 1""", """This is a simple input 2"""]
UpperCAmelCase = ("""This is a simple input""", """This is a pair""")
UpperCAmelCase = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
self.assertRaises(__SCREAMING_SNAKE_CASE ,tokenizer_r.encode ,__SCREAMING_SNAKE_CASE ,max_length=__SCREAMING_SNAKE_CASE ,padding="max_length" )
# Simple input
self.assertRaises(__SCREAMING_SNAKE_CASE ,tokenizer_r.encode_plus ,__SCREAMING_SNAKE_CASE ,max_length=__SCREAMING_SNAKE_CASE ,padding="max_length" )
# Simple input
self.assertRaises(
__SCREAMING_SNAKE_CASE ,tokenizer_r.batch_encode_plus ,__SCREAMING_SNAKE_CASE ,max_length=__SCREAMING_SNAKE_CASE ,padding="max_length" ,)
# Pair input
self.assertRaises(__SCREAMING_SNAKE_CASE ,tokenizer_r.encode ,__SCREAMING_SNAKE_CASE ,max_length=__SCREAMING_SNAKE_CASE ,padding="max_length" )
# Pair input
self.assertRaises(__SCREAMING_SNAKE_CASE ,tokenizer_r.encode_plus ,__SCREAMING_SNAKE_CASE ,max_length=__SCREAMING_SNAKE_CASE ,padding="max_length" )
# Pair input
self.assertRaises(
__SCREAMING_SNAKE_CASE ,tokenizer_r.batch_encode_plus ,__SCREAMING_SNAKE_CASE ,max_length=__SCREAMING_SNAKE_CASE ,padding="max_length" ,)
def _UpperCAmelCase ( self : Tuple ):
pass
@require_ftfy
@require_spacy
@require_tokenizers
class __magic_name__ ( A_):
pass
| 708 |
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
__lowerCAmelCase =pytest.mark.integration
@pytest.mark.parametrize("path" , ["paws", "csv"] )
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
inspect_dataset(_lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase = path + ".py"
assert script_name in os.listdir(_lowerCAmelCase )
assert "__pycache__" not in os.listdir(_lowerCAmelCase )
@pytest.mark.filterwarnings("ignore:inspect_metric is deprecated:FutureWarning" )
@pytest.mark.filterwarnings("ignore:metric_module_factory is deprecated:FutureWarning" )
@pytest.mark.parametrize("path" , ["accuracy"] )
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
inspect_metric(_lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase = path + ".py"
assert script_name in os.listdir(_lowerCAmelCase )
assert "__pycache__" not in os.listdir(_lowerCAmelCase )
@pytest.mark.parametrize(
"path, config_name, expected_splits" , [
("squad", "plain_text", ["train", "validation"]),
("dalle-mini/wit", "dalle-mini--wit", ["train"]),
("paws", "labeled_final", ["train", "test", "validation"]),
] , )
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
UpperCAmelCase = get_dataset_config_info(_lowerCAmelCase , config_name=_lowerCAmelCase )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"path, config_name, expected_exception" , [
("paws", None, ValueError),
] , )
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
with pytest.raises(_lowerCAmelCase ):
get_dataset_config_info(_lowerCAmelCase , config_name=_lowerCAmelCase )
@pytest.mark.parametrize(
"path, expected" , [
("squad", "plain_text"),
("acronym_identification", "default"),
("lhoestq/squad", "plain_text"),
("lhoestq/test", "default"),
("lhoestq/demo1", "lhoestq--demo1"),
("dalle-mini/wit", "dalle-mini--wit"),
] , )
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
UpperCAmelCase = get_dataset_config_names(_lowerCAmelCase )
assert expected in config_names
@pytest.mark.parametrize(
"path, expected_configs, expected_splits_in_first_config" , [
("squad", ["plain_text"], ["train", "validation"]),
("dalle-mini/wit", ["dalle-mini--wit"], ["train"]),
("paws", ["labeled_final", "labeled_swap", "unlabeled_final"], ["train", "test", "validation"]),
] , )
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
UpperCAmelCase = get_dataset_infos(_lowerCAmelCase )
assert list(infos.keys() ) == expected_configs
UpperCAmelCase = expected_configs[0]
assert expected_config in infos
UpperCAmelCase = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
"path, expected_config, expected_splits" , [
("squad", "plain_text", ["train", "validation"]),
("dalle-mini/wit", "dalle-mini--wit", ["train"]),
("paws", "labeled_final", ["train", "test", "validation"]),
] , )
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
UpperCAmelCase = get_dataset_infos(_lowerCAmelCase )
assert expected_config in infos
UpperCAmelCase = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"path, config_name, expected_exception" , [
("paws", None, ValueError),
] , )
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
with pytest.raises(_lowerCAmelCase ):
get_dataset_split_names(_lowerCAmelCase , config_name=_lowerCAmelCase )
| 405 | 0 |
def lowerCamelCase ( a_ ) -> int:
if not isinstance(a_ , a_ ):
raise ValueError('multiplicative_persistence() only accepts integral values' )
if num < 0:
raise ValueError('multiplicative_persistence() does not accept negative values' )
lowerCAmelCase_ = 0
lowerCAmelCase_ = str(a_ )
while len(a_ ) != 1:
lowerCAmelCase_ = [int(a_ ) for i in num_string]
lowerCAmelCase_ = 1
for i in range(0 , len(a_ ) ):
total *= numbers[i]
lowerCAmelCase_ = str(a_ )
steps += 1
return steps
def lowerCamelCase ( a_ ) -> int:
if not isinstance(a_ , a_ ):
raise ValueError('additive_persistence() only accepts integral values' )
if num < 0:
raise ValueError('additive_persistence() does not accept negative values' )
lowerCAmelCase_ = 0
lowerCAmelCase_ = str(a_ )
while len(a_ ) != 1:
lowerCAmelCase_ = [int(a_ ) for i in num_string]
lowerCAmelCase_ = 0
for i in range(0 , len(a_ ) ):
total += numbers[i]
lowerCAmelCase_ = str(a_ )
steps += 1
return steps
if __name__ == "__main__":
import doctest
doctest.testmod()
| 318 |
import argparse
import copy
def lowerCamelCase ( a_ ) -> Optional[int]:
lowerCAmelCase_ = {}
with open(a_ ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
lowerCAmelCase_ = []
_list.append([line.split()[1], line.split()[2]] )
lowerCAmelCase_ = _list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
lowerCAmelCase_ = []
_list.append([line.split()[0], line.split()[2]] )
lowerCAmelCase_ = _list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def lowerCamelCase ( a_ , a_ ) -> Dict:
with open(a_ ) as f:
lowerCAmelCase_ = f.read(1 )
lowerCAmelCase_ = start_node
lowerCAmelCase_ = []
lowerCAmelCase_ = start_node
lowerCAmelCase_ = 0
while visiting not in first_solution:
lowerCAmelCase_ = 10_000
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(a_ ) and k[0] not in first_solution:
lowerCAmelCase_ = k[1]
lowerCAmelCase_ = k[0]
first_solution.append(a_ )
lowerCAmelCase_ = distance_of_first_solution + int(a_ )
lowerCAmelCase_ = best_node
first_solution.append(a_ )
lowerCAmelCase_ = 0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
lowerCAmelCase_ = (
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 10_000
)
return first_solution, distance_of_first_solution
def lowerCamelCase ( a_ , a_ ) -> str:
lowerCAmelCase_ = []
for n in solution[1:-1]:
lowerCAmelCase_ = solution.index(a_ )
for kn in solution[1:-1]:
lowerCAmelCase_ = solution.index(a_ )
if n == kn:
continue
lowerCAmelCase_ = copy.deepcopy(a_ )
lowerCAmelCase_ = kn
lowerCAmelCase_ = n
lowerCAmelCase_ = 0
for k in _tmp[:-1]:
lowerCAmelCase_ = _tmp[_tmp.index(a_ ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
lowerCAmelCase_ = distance + int(i[1] )
_tmp.append(a_ )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
lowerCAmelCase_ = len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda a_ : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def lowerCamelCase ( a_ , a_ , a_ , a_ , a_ ) -> Optional[int]:
lowerCAmelCase_ = 1
lowerCAmelCase_ = first_solution
lowerCAmelCase_ = []
lowerCAmelCase_ = distance_of_first_solution
lowerCAmelCase_ = solution
while count <= iters:
lowerCAmelCase_ = find_neighborhood(a_ , a_ )
lowerCAmelCase_ = 0
lowerCAmelCase_ = neighborhood[index_of_best_solution]
lowerCAmelCase_ = len(a_ ) - 1
lowerCAmelCase_ = False
while not found:
lowerCAmelCase_ = 0
while i < len(a_ ):
if best_solution[i] != solution[i]:
lowerCAmelCase_ = best_solution[i]
lowerCAmelCase_ = solution[i]
break
lowerCAmelCase_ = i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
lowerCAmelCase_ = True
lowerCAmelCase_ = best_solution[:-1]
lowerCAmelCase_ = neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
lowerCAmelCase_ = cost
lowerCAmelCase_ = solution
else:
lowerCAmelCase_ = index_of_best_solution + 1
lowerCAmelCase_ = neighborhood[index_of_best_solution]
if len(a_ ) >= size:
tabu_list.pop(0 )
lowerCAmelCase_ = count + 1
return best_solution_ever, best_cost
def lowerCamelCase ( a_=None ) -> Optional[int]:
lowerCAmelCase_ = generate_neighbours(args.File )
lowerCAmelCase_ , lowerCAmelCase_ = generate_first_solution(
args.File , a_ )
lowerCAmelCase_ , lowerCAmelCase_ = tabu_search(
a_ , a_ , a_ , args.Iterations , args.Size , )
print(F'''Best solution: {best_sol}, with total distance: {best_cost}.''' )
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser(description="""Tabu Search""")
parser.add_argument(
"""-f""",
"""--File""",
type=str,
help="""Path to the file containing the data""",
required=True,
)
parser.add_argument(
"""-i""",
"""--Iterations""",
type=int,
help="""How many iterations the algorithm should perform""",
required=True,
)
parser.add_argument(
"""-s""", """--Size""", type=int, help="""Size of the tabu list""", required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 318 | 1 |
'''simple docstring'''
from ...processing_utils import ProcessorMixin
class _a ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A : Union[str, Any] = ['''image_processor''', '''feature_extractor''']
A : Optional[int] = '''TvltImageProcessor'''
A : Optional[Any] = '''TvltFeatureExtractor'''
def __init__( self, A, A ):
'''simple docstring'''
super().__init__(image_processor=snake_case__, feature_extractor=snake_case__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = image_processor
SCREAMING_SNAKE_CASE : int = feature_extractor
def __call__( self, A=None, A=None, A=None, A=None, A=False, A=False, *A, **A, ):
'''simple docstring'''
if images is None and audio is None:
raise ValueError('You need to specify either an `images` or `audio` input to process.' )
SCREAMING_SNAKE_CASE : Union[str, Any] = None
if images is not None:
SCREAMING_SNAKE_CASE : Tuple = self.image_processor(snake_case__, mask_pixel=snake_case__, *snake_case__, **snake_case__ )
if images_mixed is not None:
SCREAMING_SNAKE_CASE : str = self.image_processor(snake_case__, is_mixed=snake_case__, *snake_case__, **snake_case__ )
if audio is not None:
SCREAMING_SNAKE_CASE : Tuple = self.feature_extractor(
snake_case__, *snake_case__, sampling_rate=snake_case__, mask_audio=snake_case__, **snake_case__ )
SCREAMING_SNAKE_CASE : Tuple = {}
if audio is not None:
output_dict.update(snake_case__ )
if images is not None:
output_dict.update(snake_case__ )
if images_mixed_dict is not None:
output_dict.update(snake_case__ )
return output_dict
@property
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.image_processor.model_input_names
SCREAMING_SNAKE_CASE : Any = self.feature_extractor.model_input_names
return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names ) )
| 719 |
'''simple docstring'''
def lowercase__( __UpperCamelCase: list[list[int]] ,__UpperCamelCase: int ,__UpperCamelCase: int ,__UpperCamelCase: set ):
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = len(__UpperCamelCase ), len(grid[0] )
if (
min(__UpperCamelCase ,__UpperCamelCase ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
SCREAMING_SNAKE_CASE : Dict = 0
count += depth_first_search(__UpperCamelCase ,row + 1 ,__UpperCamelCase ,__UpperCamelCase )
count += depth_first_search(__UpperCamelCase ,row - 1 ,__UpperCamelCase ,__UpperCamelCase )
count += depth_first_search(__UpperCamelCase ,__UpperCamelCase ,col + 1 ,__UpperCamelCase )
count += depth_first_search(__UpperCamelCase ,__UpperCamelCase ,col - 1 ,__UpperCamelCase )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 508 | 0 |
'''simple docstring'''
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def snake_case_ ():
'''simple docstring'''
raise RuntimeError('''CUDA out of memory.''' )
class A ( nn.Module ):
def __init__( self : List[Any] ) -> List[str]:
"""simple docstring"""
super().__init__()
_a = nn.Linear(3 , 4 )
_a = nn.BatchNormad(4 )
_a = nn.Linear(4 , 5 )
def __lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : Dict ) -> List[str]:
"""simple docstring"""
return self.lineara(self.batchnorm(self.lineara(lowerCAmelCase_ ) ) )
class A ( unittest.TestCase ):
def __lowerCAmelCase ( self : Dict ) -> List[Any]:
"""simple docstring"""
_a = []
@find_executable_batch_size(starting_batch_size=1_28 )
def mock_training_loop_function(lowerCAmelCase_ : str ):
nonlocal batch_sizes
batch_sizes.append(lowerCAmelCase_ )
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(lowerCAmelCase_ , [1_28, 64, 32, 16, 8] )
def __lowerCAmelCase ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
_a = []
@find_executable_batch_size(starting_batch_size=1_28 )
def mock_training_loop_function(lowerCAmelCase_ : List[str] , lowerCAmelCase_ : List[Any] ):
nonlocal batch_sizes
batch_sizes.append(lowerCAmelCase_ )
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
_a , _a = mock_training_loop_function('''hello''' )
self.assertListEqual(lowerCAmelCase_ , [1_28, 64, 32, 16, 8] )
self.assertListEqual([bs, arga] , [8, '''hello'''] )
def __lowerCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
@find_executable_batch_size(starting_batch_size=0 )
def mock_training_loop_function(lowerCAmelCase_ : Tuple ):
pass
with self.assertRaises(lowerCAmelCase_ ) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0] )
def __lowerCAmelCase ( self : int ) -> Optional[int]:
"""simple docstring"""
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(lowerCAmelCase_ : Union[str, Any] ):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(lowerCAmelCase_ ) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0] )
def __lowerCAmelCase ( self : int ) -> Tuple:
"""simple docstring"""
@find_executable_batch_size(starting_batch_size=1_28 )
def mock_training_loop_function(lowerCAmelCase_ : int , lowerCAmelCase_ : Dict , lowerCAmelCase_ : str ):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(lowerCAmelCase_ ) as cm:
mock_training_loop_function(1_28 , '''hello''' , '''world''' )
self.assertIn('''Batch size was passed into `f`''' , cm.exception.args[0] )
self.assertIn('''`f(arg1=\'hello\', arg2=\'world\')''' , cm.exception.args[0] )
def __lowerCAmelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(lowerCAmelCase_ : Any ):
raise ValueError('''Oops, we had an error!''' )
with self.assertRaises(lowerCAmelCase_ ) as cm:
mock_training_loop_function()
self.assertIn('''Oops, we had an error!''' , cm.exception.args[0] )
@require_cuda
def __lowerCAmelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
_a = torch.cuda.memory_allocated()
_a = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , lowerCAmelCase_ )
_a = release_memory(lowerCAmelCase_ )
self.assertEqual(torch.cuda.memory_allocated() , lowerCAmelCase_ )
| 22 |
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class __UpperCamelCase :
def __init__( self , _UpperCamelCase , _UpperCamelCase=13 , _UpperCamelCase=7 , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=99 , _UpperCamelCase=24 , _UpperCamelCase=2 , _UpperCamelCase=6 , _UpperCamelCase=37 , _UpperCamelCase="gelu" , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=512 , _UpperCamelCase=16 , _UpperCamelCase=2 , _UpperCamelCase=0.02 , _UpperCamelCase=3 , _UpperCamelCase=None , _UpperCamelCase=1000 , ):
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_input_mask
_UpperCAmelCase = use_token_type_ids
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = num_labels
_UpperCAmelCase = scope
_UpperCAmelCase = range_bbox
def UpperCamelCase( self ):
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
_UpperCAmelCase = bbox[i, j, 3]
_UpperCAmelCase = bbox[i, j, 1]
_UpperCAmelCase = t
if bbox[i, j, 2] < bbox[i, j, 0]:
_UpperCAmelCase = bbox[i, j, 2]
_UpperCAmelCase = bbox[i, j, 0]
_UpperCAmelCase = t
_UpperCAmelCase = None
if self.use_input_mask:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
_UpperCAmelCase = None
if self.use_token_type_ids:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCAmelCase = None
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCAmelCase = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def UpperCamelCase( self ):
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ):
_UpperCAmelCase = LiltModel(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
_UpperCAmelCase = model(_UpperCamelCase , bbox=_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase )
_UpperCAmelCase = model(_UpperCamelCase , bbox=_UpperCamelCase , token_type_ids=_UpperCamelCase )
_UpperCAmelCase = model(_UpperCamelCase , bbox=_UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ):
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = LiltForTokenClassification(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
_UpperCAmelCase = model(
_UpperCamelCase , bbox=_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , labels=_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ):
_UpperCAmelCase = LiltForQuestionAnswering(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
_UpperCAmelCase = model(
_UpperCamelCase , bbox=_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , start_positions=_UpperCamelCase , end_positions=_UpperCamelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase( self ):
_UpperCAmelCase = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = config_and_inputs
_UpperCAmelCase = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class __UpperCamelCase ( A__ , A__ , A__ , unittest.TestCase ):
__A : Dict = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
__A : Optional[Any] = (
{
"""feature-extraction""": LiltModel,
"""question-answering""": LiltForQuestionAnswering,
"""text-classification""": LiltForSequenceClassification,
"""token-classification""": LiltForTokenClassification,
"""zero-shot""": LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
__A : List[Any] = False
__A : Optional[int] = False
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
return True
def UpperCamelCase( self ):
_UpperCAmelCase = LiltModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=_UpperCamelCase , hidden_size=37 )
def UpperCamelCase( self ):
self.config_tester.run_common_tests()
def UpperCamelCase( self ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCamelCase )
def UpperCamelCase( self ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_UpperCAmelCase = type
self.model_tester.create_and_check_model(*_UpperCamelCase )
def UpperCamelCase( self ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_UpperCamelCase )
def UpperCamelCase( self ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_UpperCamelCase )
@slow
def UpperCamelCase( self ):
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = LiltModel.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
@require_torch
@slow
class __UpperCamelCase ( unittest.TestCase ):
def UpperCamelCase( self ):
_UpperCAmelCase = LiltModel.from_pretrained('''SCUT-DLVCLab/lilt-roberta-en-base''' ).to(_UpperCamelCase )
_UpperCAmelCase = torch.tensor([[1, 2]] , device=_UpperCamelCase )
_UpperCAmelCase = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=_UpperCamelCase )
# forward pass
with torch.no_grad():
_UpperCAmelCase = model(input_ids=_UpperCamelCase , bbox=_UpperCamelCase )
_UpperCAmelCase = torch.Size([1, 2, 768] )
_UpperCAmelCase = torch.tensor(
[[-0.0653, 0.0950, -0.0061], [-0.0545, 0.0926, -0.0324]] , device=_UpperCamelCase , )
self.assertTrue(outputs.last_hidden_state.shape , _UpperCamelCase )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , _UpperCamelCase , atol=1e-3 ) ) | 32 | 0 |
"""simple docstring"""
import json
import os
import torch
from diffusers import UNetaDModel
os.makedirs('''hub/hopper-medium-v2/unet/hor32''', exist_ok=True)
os.makedirs('''hub/hopper-medium-v2/unet/hor128''', exist_ok=True)
os.makedirs('''hub/hopper-medium-v2/value_function''', exist_ok=True)
def __lowercase ( _a ):
if hor == 128:
snake_case_ : int = ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D')
snake_case_ : Dict = (32, 128, 256)
snake_case_ : Tuple = ('UpResnetBlock1D', 'UpResnetBlock1D')
elif hor == 32:
snake_case_ : Any = ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D')
snake_case_ : Union[str, Any] = (32, 64, 128, 256)
snake_case_ : int = ('UpResnetBlock1D', 'UpResnetBlock1D', 'UpResnetBlock1D')
snake_case_ : Dict = torch.load(f"/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch" )
snake_case_ : Any = model.state_dict()
snake_case_ : int = {
'down_block_types': down_block_types,
'block_out_channels': block_out_channels,
'up_block_types': up_block_types,
'layers_per_block': 1,
'use_timestep_embedding': True,
'out_block_type': 'OutConv1DBlock',
'norm_num_groups': 8,
'downsample_each_block': False,
'in_channels': 14,
'out_channels': 14,
'extra_in_channels': 0,
'time_embedding_type': 'positional',
'flip_sin_to_cos': False,
'freq_shift': 1,
'sample_size': 65_536,
'mid_block_type': 'MidResTemporalBlock1D',
'act_fn': 'mish',
}
snake_case_ : Union[str, Any] = UNetaDModel(**SCREAMING_SNAKE_CASE_ )
print(f"length of state dict: {len(state_dict.keys() )}" )
print(f"length of value function dict: {len(hf_value_function.state_dict().keys() )}" )
snake_case_ : List[str] = dict(zip(model.state_dict().keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
snake_case_ : List[Any] = state_dict.pop(SCREAMING_SNAKE_CASE_ )
hf_value_function.load_state_dict(SCREAMING_SNAKE_CASE_ )
torch.save(hf_value_function.state_dict() , f"hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin" )
with open(f"hub/hopper-medium-v2/unet/hor{hor}/config.json" , '''w''' ) as f:
json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def __lowercase ( ):
snake_case_ : Dict = {
'in_channels': 14,
'down_block_types': ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D'),
'up_block_types': (),
'out_block_type': 'ValueFunction',
'mid_block_type': 'ValueFunctionMidBlock1D',
'block_out_channels': (32, 64, 128, 256),
'layers_per_block': 1,
'downsample_each_block': True,
'sample_size': 65_536,
'out_channels': 14,
'extra_in_channels': 0,
'time_embedding_type': 'positional',
'use_timestep_embedding': True,
'flip_sin_to_cos': False,
'freq_shift': 1,
'norm_num_groups': 8,
'act_fn': 'mish',
}
snake_case_ : int = torch.load('''/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch''' )
snake_case_ : Dict = model
snake_case_ : int = UNetaDModel(**SCREAMING_SNAKE_CASE_ )
print(f"length of state dict: {len(state_dict.keys() )}" )
print(f"length of value function dict: {len(hf_value_function.state_dict().keys() )}" )
snake_case_ : int = dict(zip(state_dict.keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
snake_case_ : Tuple = state_dict.pop(SCREAMING_SNAKE_CASE_ )
hf_value_function.load_state_dict(SCREAMING_SNAKE_CASE_ )
torch.save(hf_value_function.state_dict() , '''hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin''' )
with open('''hub/hopper-medium-v2/value_function/config.json''' , '''w''' ) as f:
json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
unet(32)
# unet(128)
value_function()
| 703 |
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
from torchvision.transforms.functional import InterpolationMode
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
ViTImageProcessor,
ViTMAEConfig,
ViTMAEForPreTraining,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
lowercase__ : Optional[Any] = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt''')
@dataclass
class _UpperCAmelCase :
_lowerCAmelCase : Optional[str] = field(
default="""cifar10""" , metadata={"""help""": """Name of a dataset from the datasets package"""})
_lowerCAmelCase : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""})
_lowerCAmelCase : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"""help""": """The column name of the images in the files."""})
_lowerCAmelCase : Optional[str] = field(default=lowerCAmelCase__ , metadata={"""help""": """A folder containing the training data."""})
_lowerCAmelCase : Optional[str] = field(default=lowerCAmelCase__ , metadata={"""help""": """A folder containing the validation data."""})
_lowerCAmelCase : Optional[float] = field(
default=0.15 , metadata={"""help""": """Percent to split off of train for validation."""})
_lowerCAmelCase : Optional[int] = field(
default=lowerCAmelCase__ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
_lowerCAmelCase : Optional[int] = field(
default=lowerCAmelCase__ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
def _snake_case ( self : Union[str, Any] ):
snake_case_ : List[Any] = {}
if self.train_dir is not None:
snake_case_ : str = self.train_dir
if self.validation_dir is not None:
snake_case_ : Union[str, Any] = self.validation_dir
snake_case_ : Tuple = data_files if data_files else None
@dataclass
class _UpperCAmelCase :
_lowerCAmelCase : str = field(
default=lowerCAmelCase__ , metadata={
"""help""": (
"""The model checkpoint for weights initialization.Don't set if you want to train a model from scratch."""
)
} , )
_lowerCAmelCase : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"""help""": """Pretrained config name or path if not the same as model_name_or_path"""})
_lowerCAmelCase : Optional[str] = field(
default=lowerCAmelCase__ , metadata={
"""help""": (
"""Override some existing default config settings when a model is trained from scratch. Example: """
"""n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"""
)
} , )
_lowerCAmelCase : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from s3"""})
_lowerCAmelCase : str = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
_lowerCAmelCase : str = field(default=lowerCAmelCase__ , metadata={"""help""": """Name or path of preprocessor config."""})
_lowerCAmelCase : bool = field(
default=lowerCAmelCase__ , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
_lowerCAmelCase : float = field(
default=0.75 , metadata={"""help""": """The ratio of the number of masked tokens in the input sequence."""})
_lowerCAmelCase : bool = field(
default=lowerCAmelCase__ , metadata={"""help""": """Whether or not to train with normalized pixel values as target."""})
@dataclass
class _UpperCAmelCase ( lowerCAmelCase__):
_lowerCAmelCase : float = field(
default=1e-3 , metadata={"""help""": """Base learning rate: absolute_lr = base_lr * total_batch_size / 256."""})
def __lowercase ( _a ):
snake_case_ : Tuple = torch.stack([example['''pixel_values'''] for example in examples] )
return {"pixel_values": pixel_values}
def __lowercase ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
snake_case_ : Optional[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
snake_case_, snake_case_, snake_case_ : Optional[Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
snake_case_, snake_case_, snake_case_ : List[Any] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_mae''' , _a , _a )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
snake_case_ : List[str] = training_args.get_process_log_level()
logger.setLevel(_a )
transformers.utils.logging.set_verbosity(_a )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(f"Training/evaluation parameters {training_args}" )
# Detecting last checkpoint.
snake_case_ : Any = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
snake_case_ : int = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Initialize our dataset.
snake_case_ : Optional[int] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
snake_case_ : Optional[Any] = None if '''validation''' in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , _a ) and data_args.train_val_split > 0.0:
snake_case_ : List[Any] = ds['''train'''].train_test_split(data_args.train_val_split )
snake_case_ : Tuple = split['''train''']
snake_case_ : str = split['''test''']
# Load pretrained model and image processor
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
snake_case_ : Optional[int] = {
'''cache_dir''': model_args.cache_dir,
'''revision''': model_args.model_revision,
'''use_auth_token''': True if model_args.use_auth_token else None,
}
if model_args.config_name:
snake_case_ : List[Any] = ViTMAEConfig.from_pretrained(model_args.config_name , **_a )
elif model_args.model_name_or_path:
snake_case_ : Dict = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **_a )
else:
snake_case_ : Optional[int] = ViTMAEConfig()
logger.warning('''You are instantiating a new config instance from scratch.''' )
if model_args.config_overrides is not None:
logger.info(f"Overriding config: {model_args.config_overrides}" )
config.update_from_string(model_args.config_overrides )
logger.info(f"New config: {config}" )
# adapt config
config.update(
{
'''mask_ratio''': model_args.mask_ratio,
'''norm_pix_loss''': model_args.norm_pix_loss,
} )
# create image processor
if model_args.image_processor_name:
snake_case_ : Union[str, Any] = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **_a )
elif model_args.model_name_or_path:
snake_case_ : Union[str, Any] = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **_a )
else:
snake_case_ : Tuple = ViTImageProcessor()
# create model
if model_args.model_name_or_path:
snake_case_ : Tuple = ViTMAEForPreTraining.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=_a , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('''Training new model from scratch''' )
snake_case_ : Tuple = ViTMAEForPreTraining(_a )
if training_args.do_train:
snake_case_ : List[str] = ds['''train'''].column_names
else:
snake_case_ : Optional[Any] = ds['''validation'''].column_names
if data_args.image_column_name is not None:
snake_case_ : Tuple = data_args.image_column_name
elif "image" in column_names:
snake_case_ : Tuple = '''image'''
elif "img" in column_names:
snake_case_ : str = '''img'''
else:
snake_case_ : Union[str, Any] = column_names[0]
# transformations as done in original MAE paper
# source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py
if "shortest_edge" in image_processor.size:
snake_case_ : str = image_processor.size['''shortest_edge''']
else:
snake_case_ : Dict = (image_processor.size['''height'''], image_processor.size['''width'''])
snake_case_ : str = Compose(
[
Lambda(lambda _a : img.convert('''RGB''' ) if img.mode != "RGB" else img ),
RandomResizedCrop(_a , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
def preprocess_images(_a ):
snake_case_ : Tuple = [transforms(_a ) for image in examples[image_column_name]]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError('''--do_train requires a train dataset''' )
if data_args.max_train_samples is not None:
snake_case_ : List[str] = ds['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(_a )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError('''--do_eval requires a validation dataset''' )
if data_args.max_eval_samples is not None:
snake_case_ : Optional[Any] = (
ds['''validation'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(_a )
# Compute absolute learning rate
snake_case_ : Any = (
training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size
)
if training_args.base_learning_rate is not None:
snake_case_ : Union[str, Any] = training_args.base_learning_rate * total_train_batch_size / 256
# Initialize our trainer
snake_case_ : str = Trainer(
model=_a , args=_a , train_dataset=ds['''train'''] if training_args.do_train else None , eval_dataset=ds['''validation'''] if training_args.do_eval else None , tokenizer=_a , data_collator=_a , )
# Training
if training_args.do_train:
snake_case_ : Any = None
if training_args.resume_from_checkpoint is not None:
snake_case_ : Optional[int] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
snake_case_ : str = last_checkpoint
snake_case_ : List[str] = trainer.train(resume_from_checkpoint=_a )
trainer.save_model()
trainer.log_metrics('''train''' , train_result.metrics )
trainer.save_metrics('''train''' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
snake_case_ : Any = trainer.evaluate()
trainer.log_metrics('''eval''' , _a )
trainer.save_metrics('''eval''' , _a )
# Write model card and (optionally) push to hub
snake_case_ : Optional[int] = {
'''tasks''': '''masked-auto-encoding''',
'''dataset''': data_args.dataset_name,
'''tags''': ['''masked-auto-encoding'''],
}
if training_args.push_to_hub:
trainer.push_to_hub(**_a )
else:
trainer.create_model_card(**_a )
def __lowercase ( _a ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 485 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.